summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md13
-rw-r--r--.redhat-ci.inventory22
-rwxr-xr-x.redhat-ci.sh28
-rw-r--r--.redhat-ci.yml30
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.travis.yml1
-rw-r--r--BUILD.md45
-rw-r--r--CONTRIBUTING.md201
-rw-r--r--README.md7
-rw-r--r--README_CONTAINER_IMAGE.md73
-rwxr-xr-xbin/cluster1
-rw-r--r--callback_plugins/aa_version_requirement.py20
-rw-r--r--docs/best_practices_guide.adoc39
-rw-r--r--docs/pull_requests.md95
-rw-r--r--docs/repo_structure.md54
-rw-r--r--examples/README.md26
-rw-r--r--examples/certificate-check-upload.yaml56
-rw-r--r--examples/certificate-check-volume.yaml66
-rw-r--r--filter_plugins/oo_filters.py22
-rw-r--r--filter_plugins/openshift_node.py1
-rw-r--r--filter_plugins/openshift_version.py2
-rw-r--r--hack/hooks/README.md37
-rw-r--r--hack/hooks/verify_generated_modules/README.md19
-rwxr-xr-xhack/hooks/verify_generated_modules/pre-commit55
-rw-r--r--images/installer/Dockerfile (renamed from Dockerfile)9
-rw-r--r--images/installer/Dockerfile.rhel7 (renamed from Dockerfile.rhel7)6
-rw-r--r--images/installer/system-container/README.md13
-rw-r--r--images/installer/system-container/root/exports/config.json.template223
-rw-r--r--images/installer/system-container/root/exports/manifest.json11
-rw-r--r--images/installer/system-container/root/exports/service.template6
-rw-r--r--images/installer/system-container/root/exports/tmpfiles.template2
-rwxr-xr-ximages/installer/system-container/root/usr/local/bin/run-system-container.sh4
-rw-r--r--inventory/byo/hosts.byo.native-glusterfs.example51
-rw-r--r--inventory/byo/hosts.origin.example31
-rw-r--r--inventory/byo/hosts.ose.example31
-rw-r--r--library/kubeclient_ca.py2
-rwxr-xr-xlibrary/modify_yaml.py2
-rw-r--r--lookup_plugins/oo_option.py2
-rw-r--r--openshift-ansible.spec310
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml2
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py1
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml15
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml4
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml24
-rw-r--r--playbooks/byo/openshift-cluster/initialize_groups.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml24
-rw-r--r--playbooks/byo/openshift-cluster/openshift-metrics.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-router-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml104
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml101
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/README.md10
l---------playbooks/byo/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml108
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml103
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-etcd/config.yml14
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml4
-rw-r--r--playbooks/byo/openshift-glusterfs/README.md98
-rw-r--r--playbooks/byo/openshift-glusterfs/config.yml10
l---------playbooks/byo/openshift-glusterfs/filter_plugins1
l---------playbooks/byo/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-glusterfs/registry.yml10
l---------playbooks/byo/openshift-glusterfs/roles1
-rw-r--r--playbooks/byo/openshift-master/restart.yml4
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml24
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml42
-rw-r--r--playbooks/byo/openshift-node/restart.yml4
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml24
-rw-r--r--playbooks/byo/openshift-preflight/check.yml7
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml5
-rw-r--r--playbooks/common/openshift-cluster/config.yml20
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml17
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml104
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml4
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml6
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml61
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml8
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml66
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml32
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml114
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml35
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml54
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_3/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml106
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_4/roles)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml109
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml104
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml4
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml23
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
l---------playbooks/common/openshift-glusterfs/roles1
-rw-r--r--playbooks/common/openshift-master/scaleup.yml18
-rw-r--r--playbooks/common/openshift-node/network_manager.yml26
-rw-r--r--playbooks/common/openshift-node/scaleup.yml16
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml2
-rw-r--r--requirements.txt11
-rw-r--r--roles/calico/defaults/main.yaml4
-rw-r--r--roles/calico/handlers/main.yml4
-rw-r--r--roles/calico/tasks/main.yml32
-rw-r--r--roles/calico/templates/10-calico.cfg.j2 (renamed from roles/calico/templates/calico.cfg.j2)0
-rw-r--r--roles/calico/templates/calico.service.j24
-rw-r--r--roles/calico/templates/calicoctl.conf.j2 (renamed from roles/calico/templates/calico.conf.j2)0
-rw-r--r--roles/calico_master/defaults/main.yaml4
-rw-r--r--roles/calico_master/tasks/main.yml19
-rw-r--r--roles/contiv/tasks/netplugin.yml2
-rw-r--r--roles/contiv/templates/aci-gw.service2
-rw-r--r--roles/dns/templates/named.service.j28
-rw-r--r--roles/docker/README.md9
-rw-r--r--roles/docker/handlers/main.yml2
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/main.yml122
-rw-r--r--roles/docker/tasks/package_docker.yml116
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml162
-rw-r--r--roles/docker/templates/daemon.json20
-rw-r--r--roles/docker/templates/systemcontainercustom.conf.j217
-rw-r--r--roles/docker/vars/main.yml4
-rw-r--r--roles/etcd/defaults/main.yaml1
-rw-r--r--roles/etcd/files/etcdctl.sh11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml116
-rw-r--r--roles/etcd/templates/etcd.conf.j22
-rw-r--r--roles/etcd/templates/etcd.docker.service12
-rw-r--r--roles/etcd_common/README.md37
-rw-r--r--roles/etcd_common/defaults/main.yml3
-rw-r--r--roles/etcd_common/tasks/etcdctl.yml (renamed from roles/etcd/tasks/etcdctl.yml)6
-rw-r--r--roles/etcd_common/templates/etcdctl.sh.j212
-rw-r--r--roles/etcd_server_certificates/meta/main.yml2
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py19
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py11
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py11
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py13
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py52
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py11
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py25
-rw-r--r--roles/lib_openshift/library/oc_configmap.py11
-rw-r--r--roles/lib_openshift/library/oc_edit.py11
-rw-r--r--roles/lib_openshift/library/oc_env.py11
-rw-r--r--roles/lib_openshift/library/oc_group.py11
-rw-r--r--roles/lib_openshift/library/oc_image.py11
-rw-r--r--roles/lib_openshift/library/oc_label.py11
-rw-r--r--roles/lib_openshift/library/oc_obj.py22
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py13
-rw-r--r--roles/lib_openshift/library/oc_process.py11
-rw-r--r--roles/lib_openshift/library/oc_project.py11
-rw-r--r--roles/lib_openshift/library/oc_pvc.py11
-rw-r--r--roles/lib_openshift/library/oc_route.py11
-rw-r--r--roles/lib_openshift/library/oc_scale.py11
-rw-r--r--roles/lib_openshift/library/oc_secret.py11
-rw-r--r--roles/lib_openshift/library/oc_service.py11
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py11
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py11
-rw-r--r--roles/lib_openshift/library/oc_user.py11
-rw-r--r--roles/lib_openshift/library/oc_version.py11
-rw-r--r--roles/lib_openshift/library/oc_volume.py23
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/src/class/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py41
-rw-r--r--roles/lib_openshift/src/class/oc_clusterrole.py6
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py9
-rw-r--r--roles/lib_openshift/src/class/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/src/doc/obj2
-rw-r--r--roles/lib_openshift/src/doc/volume12
-rw-r--r--roles/lib_openshift/src/lib/base.py9
-rw-r--r--roles/lib_openshift/src/lib/rule.py8
-rw-r--r--roles/lib_openshift/src/test/integration/filter_plugins/filters.py1
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_label.yml2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_user.yml2
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py12
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py7
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py28
-rw-r--r--roles/lib_utils/library/repoquery.py30
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py1
-rw-r--r--roles/lib_utils/src/class/repoquery.py28
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py1
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py3
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py4
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py3
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py2
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml2
-rw-r--r--roles/openshift_common/tasks/main.yml10
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml1
-rw-r--r--roles/openshift_etcd_ca/tasks/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
l---------roles/openshift_examples/files/examples/latest2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json25
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml (renamed from roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/README.md)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json)2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json (renamed from roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json)2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json)25
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json (renamed from roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json)0
-rw-r--r--roles/openshift_excluder/README.md62
-rw-r--r--roles/openshift_excluder/defaults/main.yml19
-rw-r--r--roles/openshift_excluder/meta/main.yml5
-rw-r--r--roles/openshift_excluder/tasks/disable.yml65
-rw-r--r--roles/openshift_excluder/tasks/enable.yml20
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml42
-rw-r--r--roles/openshift_excluder/tasks/init.yml12
-rw-r--r--roles/openshift_excluder/tasks/install.yml29
-rw-r--r--roles/openshift_excluder/tasks/main.yml38
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml38
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml32
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml12
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py37
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py1
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py1
-rwxr-xr-xroles/openshift_health_checker/library/check_yum_update.py1
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py65
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py44
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py17
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py7
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py4
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py155
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py91
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py22
-rw-r--r--roles/openshift_hosted/README.md8
-rw-r--r--roles/openshift_hosted/defaults/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml8
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml92
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml16
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml12
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml4
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml)4
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml (renamed from roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml)0
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j28
-rw-r--r--roles/openshift_logging/README.md2
-rw-r--r--roles/openshift_logging/defaults/main.yml27
-rw-r--r--roles/openshift_logging/handlers/main.yml9
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml23
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml42
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml6
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml32
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml32
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml192
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml41
-rw-r--r--roles/openshift_logging/tasks/main.yaml3
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml94
-rw-r--r--roles/openshift_logging/tasks/procure_shared_key.yaml25
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml80
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml23
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/templates/curator.j25
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j25
-rw-r--r--roles/openshift_logging/templates/es.j210
-rw-r--r--roles/openshift_logging/templates/fluentd.j218
-rw-r--r--roles/openshift_logging/templates/kibana.j235
-rw-r--r--roles/openshift_logging/templates/mux.j2121
-rw-r--r--roles/openshift_logging/templates/service.j26
-rw-r--r--roles/openshift_logging/vars/main.yaml12
-rw-r--r--roles/openshift_logging/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml88
-rw-r--r--roles/openshift_manageiq/vars/main.yml64
-rw-r--r--roles/openshift_master/files/atomic-openshift-master.service23
-rw-r--r--roles/openshift_master/files/origin-master.service23
l---------roles/openshift_master/tasks/files1
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml10
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j28
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j28
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml8
-rw-r--r--roles/openshift_master_facts/defaults/main.yml22
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py1
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py4
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py2
-rw-r--r--roles/openshift_master_facts/tasks/main.yml6
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py7
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py7
-rw-r--r--roles/openshift_metrics/README.md2
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh52
-rw-r--r--roles/openshift_metrics/handlers/main.yml9
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml83
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_certificates.yaml40
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_secrets.yaml14
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml37
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml8
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j224
-rw-r--r--roles/openshift_metrics/templates/heapster.j229
-rw-r--r--roles/openshift_metrics/templates/service.j26
-rw-r--r--roles/openshift_metrics/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml34
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml8
-rw-r--r--roles/openshift_node/templates/atomic-openshift-node.service.j222
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service10
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_node/templates/origin-node.service.j221
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml2
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml78
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml (renamed from roles/openshift_node_upgrade/tasks/docker/restart.yml)7
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service10
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo2
-rw-r--r--roles/openshift_repos/tasks/main.yaml25
-rw-r--r--roles/openshift_storage_glusterfs/README.md94
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml36
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml115
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml10
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml128
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml113
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml166
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml22
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml107
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml71
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml109
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml22
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j211
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j239
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml77
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml50
-rw-r--r--roles/os_firewall/README.md2
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml2
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml9
-rw-r--r--setup.py48
-rw-r--r--test-requirements.txt23
-rw-r--r--test/integration/README.md39
-rwxr-xr-xtest/integration/build-images.sh101
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile30
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo5
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec33
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec32
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec44
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec44
-rw-r--r--test/integration/openshift_health_checker/builds/test-target-base/Dockerfile2
-rw-r--r--test/integration/openshift_health_checker/common.go99
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml20
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml20
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml24
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml31
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml21
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml27
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml24
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml24
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml26
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml20
l---------test/integration/openshift_health_checker/preflight/playbooks/roles (renamed from playbooks/byo/openshift-cluster/upgrades/v3_5/roles)0
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/preflight_test.go105
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml45
-rw-r--r--test/integration/openshift_health_checker/teardown_container.yml23
-rwxr-xr-xtest/integration/run-tests.sh80
-rw-r--r--test/openshift_version_tests.py2
-rw-r--r--test/unit/modify_yaml_tests.py (renamed from test/modify_yaml_tests.py)2
-rw-r--r--tox.ini12
-rw-r--r--utils/src/ooinstall/cli_installer.py6
-rw-r--r--utils/src/ooinstall/variants.py7
561 files changed, 8697 insertions, 5431 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index cdfd93725..2a4f80a36 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,16 +1,3 @@
-### <HTPASSWD_AUTH>
-
-We are aware of the current issues related to htpasswd_auth failures
-Please downgrade to ansible 2.2.0.0 until a fix is released.
-You can track the status of the bug fix in this issue:
-https://github.com/openshift/openshift-ansible/issues/3111
-Please erase this <HTPASSWD_AUTH> section if it does not apply to you.
-
-Thanks - 2017-01-31
-
-### </HTPASSWD_AUTH>
-
-
#### Description
Provide a brief description of your issue here. For example:
diff --git a/.redhat-ci.inventory b/.redhat-ci.inventory
new file mode 100644
index 000000000..3c8296055
--- /dev/null
+++ b/.redhat-ci.inventory
@@ -0,0 +1,22 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+ansible_ssh_user=root
+ansible_python_interpreter=/usr/bin/python3
+deployment_type=origin
+openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
+openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
+
+[masters]
+ocp-master
+
+[etcd]
+ocp-master
+
+[nodes]
+ocp-master openshift_schedulable=false
+ocp-node1 openshift_node_labels="{'region':'infra'}"
+ocp-node2 openshift_node_labels="{'region':'infra'}"
diff --git a/.redhat-ci.sh b/.redhat-ci.sh
new file mode 100755
index 000000000..fce8c1d52
--- /dev/null
+++ b/.redhat-ci.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -xeuo pipefail
+
+pip install -r requirements.txt
+
+# ping the nodes to check they're responding and register their ostree versions
+ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status'
+
+upload_journals() {
+ mkdir journals
+ for node in master node1 node2; do
+ ssh ocp-$node 'journalctl --no-pager || true' > journals/ocp-$node.log
+ done
+}
+
+trap upload_journals ERR
+
+# run the actual installer
+ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml
+
+# run a small subset of origin conformance tests to sanity
+# check the cluster NB: we run it on the master since we may
+# be in a different OSP network
+ssh ocp-master docker run --rm --net=host --privileged \
+ -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c \
+ '"dnf install -y origin-tests && \
+ KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \
+ --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
diff --git a/.redhat-ci.yml b/.redhat-ci.yml
new file mode 100644
index 000000000..6dac7b256
--- /dev/null
+++ b/.redhat-ci.yml
@@ -0,0 +1,30 @@
+---
+
+cluster:
+ hosts:
+ - name: ocp-master
+ distro: fedora/25/atomic
+ - name: ocp-node1
+ distro: fedora/25/atomic
+ - name: ocp-node2
+ distro: fedora/25/atomic
+ container:
+ image: fedora:25
+
+packages:
+ - gcc
+ - python-pip
+ - python-devel
+ - openssl-devel
+ - redhat-rpm-config
+
+context: 'fedora/25/atomic | origin/v3.6.0-alpha.1'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1
+
+tests:
+ - ./.redhat-ci.sh
+
+artifacts:
+ - journals/
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 13ff44567..200f8d7f3 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.22-1 ./
+3.6.68-1 ./
diff --git a/.travis.yml b/.travis.yml
index 245202139..1c549cec9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,6 +13,7 @@ python:
- "3.5"
install:
+ - pip install --upgrade pip
- pip install tox-travis coveralls
script:
diff --git a/BUILD.md b/BUILD.md
index 38f8f38e3..e6541ace3 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -1,12 +1,11 @@
-# openshift-ansible RPM Build instructions
+# openshift-ansible build instructions
+
+## Build openshift-ansible RPMs
We use tito to make building and tracking revisions easy.
For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page").
-
-## Build openshift-ansible
-
- Change into openshift-ansible
```
cd openshift-ansible
@@ -24,3 +23,41 @@ tito tag
```
tito build --rpm
```
+
+## Build an openshift-ansible container image
+
+To build a container image of `openshift-ansible` using standalone **Docker**:
+
+ cd openshift-ansible
+ docker build -t openshift/openshift-ansible .
+
+Alternatively this can be built using on **OpenShift** using a [build and image stream](https://docs.openshift.org/latest/architecture/core_concepts/builds_and_image_streams.html) with this command:
+
+ oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
+
+The progress of the build can be monitored with:
+
+ oc logs -f bc/openshift-ansible
+
+Once built, the image will be visible in the Image Stream created by the same command:
+
+ oc describe imagestream openshift-ansible
+
+## Build the Atomic System Container
+
+A system container runs using runC instead of Docker and it is managed
+by the [atomic](https://github.com/projectatomic/atomic/) tool. As it
+doesn't require Docker to run, the installer can run on a node of the
+cluster without interfering with the Docker daemon that is configured
+by the installer itself.
+
+The first step is to build the [container image](#build-an-openshift-ansible-container-image)
+as described before. The container image already contains all the
+required files to run as a system container.
+
+Once the container image is built, we can import it into the OSTree
+storage:
+
+```
+atomic pull --storage ostree docker:openshift/openshift-ansible:latest
+```
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a3ae3fd10..1c0fa73ad 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,92 +3,103 @@
Thank you for contributing to OpenShift Ansible. This document explains how the
repository is organized, and how to submit contributions.
-## Introduction
+**Table of Contents**
-Before submitting code changes, get familiarized with these documents:
+<!-- TOC depthFrom:2 depthTo:4 withLinks:1 updateOnSave:1 orderedList:0 -->
-- [Core Concepts](https://github.com/openshift/openshift-ansible/blob/master/docs/core_concepts_guide.adoc)
-- [Best Practices Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/best_practices_guide.adoc)
-- [Style Guide](https://github.com/openshift/openshift-ansible/blob/master/docs/style_guide.adoc)
+- [Introduction](#introduction)
+- [Submitting contributions](#submitting-contributions)
+- [Running tests and other verification tasks](#running-tests-and-other-verification-tasks)
+ - [Running only specific tasks](#running-only-specific-tasks)
+- [Appendix](#appendix)
+ - [Tricks](#tricks)
+ - [Activating a virtualenv managed by tox](#activating-a-virtualenv-managed-by-tox)
+ - [Limiting the unit tests that are run](#limiting-the-unit-tests-that-are-run)
+ - [Finding unused Python code](#finding-unused-python-code)
-## Repository structure
+<!-- /TOC -->
-### Ansible
+## Introduction
-```
-.
-├── inventory Contains dynamic inventory scripts, and examples of
-│ Ansible inventories.
-├── library Contains Python modules used by the playbooks.
-├── playbooks Contains Ansible playbooks targeting multiple use cases.
-└── roles Contains Ansible roles, units of shared behavior among
- playbooks.
-```
+Before submitting code changes, get familiarized with these documents:
-#### Ansible plugins
+- [Core Concepts](docs/core_concepts_guide.adoc)
+- [Best Practices Guide](docs/best_practices_guide.adoc)
+- [Style Guide](docs/style_guide.adoc)
+- [Repository Structure](docs/repo_structure.md)
-These are plugins used in playbooks and roles:
+Please consider opening an issue or discussing on an existing one if you are
+planning to work on something larger, to make sure your time investment is
+something that can be merged to the repository.
-```
-.
-├── ansible-profile
-├── callback_plugins
-├── filter_plugins
-└── lookup_plugins
-```
+## Submitting contributions
-### Scripts
+1. [Fork](https://help.github.com/articles/fork-a-repo/) this repository and
+ [create a work branch in your fork](https://help.github.com/articles/github-flow/).
+2. Go through the documents mentioned in the [introduction](#introduction).
+3. Make changes and commit. You may want to review your changes and
+ [run tests](#running-tests-and-other-verification-tasks) before pushing your
+ branch.
+4. [Open a Pull Request](https://help.github.com/articles/creating-a-pull-request/).
+ Give it a meaningful title explaining the changes you are proposing, and
+ then add further details in the description.
+
+One of the repository maintainers will then review the PR and trigger tests, and
+possibly start a discussion that goes on until the PR is ready to be merged.
+This process is further explained in the
+[Pull Request process](docs/pull_requests.md) document.
+
+If you get no timely feedback from a project contributor / maintainer, sorry for
+the delay. You can help us speed up triaging, reviewing and eventually merging
+contributions by requesting a review or tagging in a comment
+[someone who has worked on the files](https://help.github.com/articles/tracing-changes-in-a-file/)
+you're proposing changes to.
-```
-.
-├── bin [DEPRECATED] Contains the `bin/cluster` script, a
-│ wrapper around the Ansible playbooks that ensures proper
-│ configuration, and facilitates installing, updating,
-│ destroying and configuring OpenShift clusters.
-│ Note: this tool is kept in the repository for legacy
-│ reasons and will be removed at some point.
-└── utils Contains the `atomic-openshift-installer` command, an
- interactive CLI utility to install OpenShift across a
- set of hosts.
-```
+---
-### Documentation
+**Note**: during the review process, you may add new commits to address review
+comments or change existing commits. However, before getting your PR merged,
+please [squash commits](https://help.github.com/articles/about-git-rebase/) to a
+minimum set of meaningful commits.
-```
-.
-└── docs Contains documentation for this repository.
-```
+If you've broken your work up into a set of sequential changes and each commit
+pass the tests on their own then that's fine. If you've got commits fixing typos
+or other problems introduced by previous commits in the same PR, then those
+should be squashed before merging.
-### Tests
+If you are new to Git, these links might help:
-```
-.
-└── test Contains tests.
-```
+- https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History
+- http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html
-## Building RPMs
+---
-See the [RPM build instructions](BUILD.md).
+## Running tests and other verification tasks
-## Running tests
+We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where
+tests and other verification tasks are run. We use
+[`pytest`](https://docs.pytest.org/) as our test runner.
-We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
-tests. Alternatively, tests can be run using
-[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
-parallel.
+Alternatively to `tox`, one can use
+[`detox`](https://pypi.python.org/pypi/detox/) for running verification tasks in
+parallel. Note that while `detox` may be useful in development to make use of
+multiple cores, it can be buggy at times and produce flakes, thus we do not use
+it in our [CI](docs/continuous_integration.md) jobs.
-Note: while `detox` may be useful in development to make use of multiple cores,
-it can be buggy at times and produce flakes, thus we do not use it in our CI.
+```
+pip install tox
+```
+To run all tests and verification tasks:
```
-pip install tox detox
+tox
```
---
-Note: before running `tox` or `detox`, ensure that the only virtualenvs within
-the repository root are the ones managed by `tox`, those in a `.tox`
+**Note**: before running `tox` or `detox`, ensure that the only virtualenvs
+within the repository root are the ones managed by `tox`, those in a `.tox`
subdirectory.
Use this command to list paths that are likely part of a virtualenv not managed
@@ -105,45 +116,52 @@ potentially fail.
---
-List the test environments available:
+### Running only specific tasks
+
+The [tox configuration](tox.ini) describes environments based on either Python 2
+or Python 3. Each environment is associated with a command that is executed in
+the context of a virtualenv, with a specific version of Python, installed
+dependencies, environment variables and so on. To list the environments
+available:
```
tox -l
```
-Run all of the tests and linters with:
+To run the command of a particular environment, e.g., `flake8` on Python 2.7:
```
-tox
+tox -e py27-flake8
```
-Run all of the tests linters in parallel (may flake):
+To run the command of a particular environment in a clean virtualenv, e.g.,
+`pylint` on Python 3.5:
```
-detox
+tox -re py35-pylint
```
-### Run only unit tests or some specific linter
+The `-r` flag recreates existing environments, useful to force dependencies to
+be reinstalled.
-Run a particular test environment (`flake8` on Python 2.7 in this case):
+## Appendix
-```
-tox -e py27-flake8
-```
+### Tricks
-Run a particular test environment in a clean virtualenv (`pylint` on Python 3.5
-in this case):
+Here are some useful tips that might improve your workflow while working on this repository.
-```
-tox -re py35-pylint
-```
+#### Git Hooks
-### Tricks
+Git hooks are included in this repository to aid in development. Check
+out the README in the
+[hack/hooks](http://github.com/openshift/openshift-ansible/blob/master/hack/hooks/README.md)
+directory for more information.
#### Activating a virtualenv managed by tox
-If you want to enter a virtualenv created by tox to do additional
-testing/debugging (py27-flake8 env in this case):
+If you want to enter a virtualenv created by tox to do additional debugging, you
+can activate it just like any other virtualenv (py27-flake8 environment in this
+example):
```
source .tox/py27-flake8/bin/activate
@@ -182,32 +200,7 @@ $ tox -e py27-unit -- roles/lib_openshift/src/test/unit/test_oc_project.py -k te
Among other things, this can be used for instance to see the coverage levels of
individual modules as we work on improving tests.
-## Submitting contributions
-
-1. Go through the guides from the [introduction](#Introduction).
-2. Fork this repository, and create a work branch in your fork.
-3. Make changes and commit. You may want to review your changes and run tests
- before pushing your branch.
-4. Open a Pull Request.
-
-One of the repository maintainers will then review the PR and submit it for
-testing.
-
-The `default` test job is publicly accessible at
-https://ci.openshift.redhat.com/jenkins/job/openshift-ansible/. The other jobs
-are run on a different Jenkins host that is not publicly accessible, however the
-test results are posted to S3 buckets when complete.
-
-The test output of each job is also posted to the Pull Request as comments.
-
-A trend of the time taken by merge jobs is available at
-https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend.
-
----
-
-## Appendix
-
-### Finding unused Python code
+#### Finding unused Python code
If you are contributing with Python code, you can use the tool
[`vulture`](https://pypi.python.org/pypi/vulture) to verify that you are not
diff --git a/README.md b/README.md
index 3ec6555e8..1cf8d1156 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ you are not running a stable release.
***
Requirements:
- - Ansible >= 2.2.0
+ - Ansible >= 2.2.2.0
- Jinja >= 2.7
- pyOpenSSL
- python-lxml
@@ -83,7 +83,10 @@ See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for information on ho
See the [hooks documentation](HOOKS.md).
-
## Contributing
See the [contribution guide](CONTRIBUTING.md).
+
+## Building openshift-ansible RPMs and container images
+
+See the [build instructions](BUILD.md).
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index 2499e01d4..b78073100 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -1,43 +1,72 @@
# Containerized openshift-ansible to run playbooks
-The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks.
+The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks. See [BUILD.md](BUILD.md) for image build instructions.
-**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
-
-## Build
-
-To build a container image of `openshift-ansible`:
-
-1. Using standalone **Docker**:
+The image is designed to **run as a non-root user**. The container's UID is mapped to the username `default` at runtime. Therefore, the container's environment reflects that user's settings, and the configuration should match that. For example `$HOME` is `/opt/app-root/src`, so ssh keys are expected to be under `/opt/app-root/src/.ssh`. If you ran a container as `root` you would have to adjust the container's configuration accordingly, e.g. by placing ssh keys under `/root/.ssh` instead. Nevertheless, the expectation is that containers will be run as non-root; for example, this container image can be run inside OpenShift under the default `restricted` [security context constraint](https://docs.openshift.org/latest/architecture/additional_concepts/authorization.html#security-context-constraints).
- cd openshift-ansible
- docker build -t openshift/openshift-ansible .
-
-1. Using an **OpenShift** build:
-
- oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
- oc describe imagestream openshift-ansible
+**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
## Usage
The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
-At the very least, when running a container using an image built this way you must specify:
+At the very least, when running a container you must specify:
-1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it.
-1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh`
-1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collecting and show facts about your OpenShift environment.
+1. An **inventory**. This can be a location inside the container (possibly mounted as a volume) with a path referenced via the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it, or `DYNAMIC_SCRIPT_URL` to download a script that provides a dynamic inventory.
-Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
+1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh` under normal usage (i.e. when running the container as non-root).
+
+1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable. If you don't specify a playbook the [`openshift_facts`](playbooks/byo/openshift_facts.yml) playbook will be run to collect and show facts about your OpenShift environment.
+
+Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](roles/openshift_certificate_expiry):
docker run -u `id -u` \
-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
-v /etc/ansible/hosts:/tmp/inventory \
-e INVENTORY_FILE=/tmp/inventory \
- -e OPTS="-v" \
-e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
+ -e OPTS="-v" -t \
openshift/openshift-ansible
-Further usage examples are available in the [examples directory](examples/).
+You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below).
+
+Here is a detailed explanation of the options used in the command above:
+
+* ``-u `id -u` `` makes the container run with the same UID as the current user, which is required for permissions so that the ssh key can be read inside the container (ssh private keys are expected to be readable only by their owner). Usually you would invoke `docker run` as a non-root user that has privileges to run containers and leave that option as is.
+
+* `-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z` mounts your ssh key (`$HOME/.ssh/id_rsa`) under the `default` user's `$HOME/.ssh` in the container (as explained above, `/opt/app-root/src` is the `$HOME` of the `default` user in the container). If you mount the ssh key into a non-standard location you can add an environment variable with `-e ANSIBLE_PRIVATE_KEY_FILE=/the/mount/point` or set `ansible_ssh_private_key_file=/the/mount/point` as a variable in the inventory to point Ansible at it.
+
+ Note that the ssh key is mounted with the `:Z` flag: this is also required so that the container can read the ssh key from its restricted SELinux context; this means that *your original ssh key file will be re-labeled* to something like `system_u:object_r:container_file_t:s0:c113,c247`. For more details about `:Z` please check the `docker-run(1)` man page. Please keep this in mind when providing these volume mount specifications because this could have unexpected consequences: for example, if you mount (and therefore re-label) your whole `$HOME/.ssh` directory you will block `sshd` from accessing your keys. This is a reason why you might want to work on a separate copy of the ssh key, so that the original file's labels remain untouched.
+
+* `-v /etc/ansible/hosts:/tmp/inventory` and `-e INVENTORY_FILE=/tmp/inventory` mount the Ansible inventory file into the container as `/tmp/inventory` and set the corresponding environment variable to point at it respectively. The example uses `/etc/ansible/hosts` as the inventory file as this is a default location, but your inventory is likely to be elsewhere so please adjust as needed. Note that depending on the file you point to you might have to handle SELinux labels in a similar way as with the ssh keys, e.g. by adding a `:z` flag to the volume mount, so again you might prefer to copy the inventory to a dedicated location first.
+
+* `-e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
+
+* `-e OPTS="-v"` and `-t` make the output look nicer: the `default.yaml` playbook does not generate results and runs quietly unless we add the `-v` option to the `ansible-playbook` invocation, and a TTY is allocated via `-t` so that Ansible adds color to the output.
+
+Further usage examples are available in the [examples directory](examples/) with samples of how to use the image from within OpenShift.
Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples).
+
+## Running openshift-ansible as a System Container
+
+Building the System Container: See the [BUILD.md](BUILD.md).
+
+Copy ssh public key of the host machine to master and nodes machines in the cluster.
+
+If the inventory file needs additional files then it can use the path `/var/lib/openshift-installer` in the container as it is bind mounted from the host (controllable with `VAR_LIB_OPENSHIFT_INSTALLER`).
+
+Run the ansible system container:
+
+```sh
+atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/openshift-ansible
+systemctl start openshift-ansible
+```
+
+The `INVENTORY_FILE` variable says to the installer what inventory file on the host will be bind mounted inside the container. In the example above, a file called `inventory.origin` in the current directory is used as the inventory file for the installer.
+
+And to finally cleanup the container:
+
+```
+atomic uninstall openshift-ansible
+```
diff --git a/bin/cluster b/bin/cluster
index b9b2ab15f..f77eb36ad 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -1,5 +1,4 @@
#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
import ConfigParser
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index f31445381..20bdd9056 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -7,7 +7,6 @@ The plugin is named with leading `aa_` to ensure this plugin is loaded
first (alphanumerically) by Ansible.
"""
import sys
-from subprocess import check_output
from ansible import __version__
if __version__ < '2.0':
@@ -30,13 +29,8 @@ else:
# Set to minimum required Ansible version
-REQUIRED_VERSION = '2.2.0.0'
-DESCRIPTION = "Supported versions: %s or newer (except 2.2.1.0)" % REQUIRED_VERSION
-FAIL_ON_2_2_1_0 = "There are known issues with Ansible version 2.2.1.0 which " \
- "are impacting OpenShift-Ansible. Please use Ansible " \
- "version 2.2.0.0 or a version greater than 2.2.1.0. " \
- "See this issue for more details: " \
- "https://github.com/openshift/openshift-ansible/issues/3111"
+REQUIRED_VERSION = '2.2.2.0'
+DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
def version_requirement(version):
@@ -64,13 +58,3 @@ class CallbackModule(CallbackBase):
'FATAL: Current Ansible version (%s) is not supported. %s'
% (__version__, DESCRIPTION), color='red')
sys.exit(1)
-
- if __version__ == '2.2.1.0':
- rpm_ver = str(check_output(["rpm", "-qa", "ansible"]))
- patched_ansible = '2.2.1.0-2'
-
- if patched_ansible not in rpm_ver:
- display(
- 'FATAL: Current Ansible version (%s) is not supported. %s'
- % (__version__, FAIL_ON_2_2_1_0), color='red')
- sys.exit(1)
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index 7f3d85d40..e66c5addb 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -11,44 +11,9 @@ All new pull requests created against this repository MUST comply with this guid
This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
-== Pull Requests
-
-
-
-[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]]
-[cols="2v,v"]
-|===
-| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>>
-| All pull requests MUST pass the build bot *before* they are merged.
-|===
-
-The purpose of this rule is to avoid cases where the build bot will fail pull requests for code modified in a previous pull request.
-
-The tooling is flexible enough that exceptions can be made so that the tool the build bot is running will ignore certain areas or certain checks, but the build bot itself must pass for the pull request to be merged.
-
-
== Python
-=== Python Source Files
-
-'''
-[[Python-source-files-MUST-contain-the-following-vim-mode-line]]
-[cols="2v,v"]
-|===
-| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>>
-| Python source files MUST contain the following vim mode line.
-|===
-
-[source]
-----
-# vim: expandtab:tabstop=4:shiftwidth=4
-----
-
-Since most developers contributing to this repository use vim, this rule helps to promote consistency.
-
-If mode lines for other editors are needed, please open a GitHub issue.
-
=== Method Signatures
'''
@@ -509,12 +474,12 @@ The Ansible `package` module calls the associated package manager for the underl
# tasks.yml
- name: Install etcd (for etcdctl)
yum: name=etcd state=latest
- when: "ansible_pkg_mgr == yum"
+ when: ansible_pkg_mgr == yum
register: install_result
- name: Install etcd (for etcdctl)
dnf: name=etcd state=latest
- when: "ansible_pkg_mgr == dnf"
+ when: ansible_pkg_mgr == dnf
register: install_result
----
diff --git a/docs/pull_requests.md b/docs/pull_requests.md
new file mode 100644
index 000000000..fcc3e275c
--- /dev/null
+++ b/docs/pull_requests.md
@@ -0,0 +1,95 @@
+# Pull Request process
+
+Pull Requests in the `openshift-ansible` project follow a
+[Continuous](https://en.wikipedia.org/wiki/Continuous_integration)
+[Integration](https://martinfowler.com/articles/continuousIntegration.html)
+process that is similar to the process observed in other repositories such as
+[`origin`](https://github.com/openshift/origin).
+
+Whenever a
+[Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some
+automated test jobs must be successfully run before the PR can be merged.
+
+Some of these jobs are automatically triggered, e.g., Travis and Coveralls.
+Other jobs need to be manually triggered by a member of the
+[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors).
+
+## Triggering tests
+
+We have two different Jenkins infrastructures, and, while that holds true, there
+are two commands that trigger a different set of test jobs. We are working on
+simplifying the workflow towards a single infrastructure in the future.
+
+- **Test jobs on the older infrastructure**
+
+ Members of the [OpenShift organization](https://github.com/orgs/openshift/people)
+ can trigger the set of test jobs in the older infrastructure by writing a
+ comment with the exact text `aos-ci-test` and nothing else.
+
+ The Jenkins host is not publicly accessible. Test results are posted to S3
+ buckets when complete, and links are available both at the bottom of the Pull
+ Request page and as comments posted by
+ [@openshift-bot](https://github.com/openshift-bot).
+
+- **Test jobs on the newer infrastructure**
+
+ Members of the
+ [Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+ can trigger the set of test jobs in the newer infrastructure by writing a
+ comment containing `[test]` anywhere in the comment body.
+
+ The [Jenkins host](https://ci.openshift.redhat.com/jenkins/job/test_pull_request_openshift_ansible/)
+ is publicly accessible. Like for the older infrastructure, the result of each
+ job is also posted to the Pull Request as comments and summarized at the
+ bottom of the Pull Request page.
+
+### Fedora tests
+
+There are a set of tests that run on Fedora infrastructure. They are started
+automatically with every pull request.
+
+They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci).
+
+To re-run tests, write a comment containing `bot, retest this please`.
+
+## Triggering merge
+
+After a PR is properly reviewed and a set of
+[required jobs](https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml)
+reported successfully, it can be tagged for merge by a member of the
+[Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors)
+by writing a comment containing `[merge]` anywhere in the comment body.
+
+Tagging a Pull Request for merge puts it in an automated merge queue. The
+[@openshift-bot](https://github.com/openshift-bot) monitors the queue and merges
+PRs that pass all of the required tests.
+
+### Manual merges
+
+The normal process described above should be followed: `aos-ci-test` and
+`[test]` / `[merge]`.
+
+In exceptional cases, such as when known problems with the merge queue prevent
+PRs from being merged, a PR may be manually merged if _all_ of these conditions
+are true:
+
+- [ ] Travis job must have passed (as enforced by GitHub)
+- [ ] Must have passed `aos-ci-test` (as enforced by GitHub)
+- [ ] Must have a positive review (as enforced by GitHub)
+- [ ] Must have failed the `[merge]` queue with a reported flake at least twice
+- [ ] Must have [issues labeled kind/test-flake](https://github.com/openshift/origin/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Ftest-flake) in [Origin](https://github.com/openshift/origin) linked in comments for the failures
+- [ ] Content must not have changed since all of the above conditions have been met (no rebases, no new commits)
+
+This exception is temporary and should be completely removed in the future once
+the merge queue has become more stable.
+
+Only members of the
+[Team OpenShift Ansible Committers](https://github.com/orgs/openshift/teams/team-openshift-ansible-committers)
+can perform manual merges.
+
+## Useful links
+
+- Repository containing Jenkins job definitions: https://github.com/openshift/aos-cd-jobs
+- List of required successful jobs before merge: https://github.com/openshift/aos-cd-jobs/blob/master/sjb/test_status_config.yml
+- Source code of the bot responsible for testing and merging PRs: https://github.com/openshift/test-pull-requests/
+- Trend of the time taken by merge jobs: https://ci.openshift.redhat.com/jenkins/job/merge_pull_request_openshift_ansible/buildTimeTrend
diff --git a/docs/repo_structure.md b/docs/repo_structure.md
new file mode 100644
index 000000000..693837fba
--- /dev/null
+++ b/docs/repo_structure.md
@@ -0,0 +1,54 @@
+# Repository structure
+
+### Ansible
+
+```
+.
+├── inventory Contains dynamic inventory scripts, and examples of
+│ Ansible inventories.
+├── library Contains Python modules used by the playbooks.
+├── playbooks Contains Ansible playbooks targeting multiple use cases.
+└── roles Contains Ansible roles, units of shared behavior among
+ playbooks.
+```
+
+#### Ansible plugins
+
+These are plugins used in playbooks and roles:
+
+```
+.
+├── ansible-profile
+├── callback_plugins
+├── filter_plugins
+└── lookup_plugins
+```
+
+### Scripts
+
+```
+.
+├── bin [DEPRECATED] Contains the `bin/cluster` script, a
+│ wrapper around the Ansible playbooks that ensures proper
+│ configuration, and facilitates installing, updating,
+│ destroying and configuring OpenShift clusters.
+│ Note: this tool is kept in the repository for legacy
+│ reasons and will be removed at some point.
+└── utils Contains the `atomic-openshift-installer` command, an
+ interactive CLI utility to install OpenShift across a
+ set of hosts.
+```
+
+### Documentation
+
+```
+.
+└── docs Contains documentation for this repository.
+```
+
+### Tests
+
+```
+.
+└── test Contains tests.
+```
diff --git a/examples/README.md b/examples/README.md
index 0e412244d..d54752fb9 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -69,19 +69,19 @@ To run these examples we prepare the inventory and ssh keys as in the other exam
Additionally we allocate a `PersistentVolumeClaim` to store the reports:
- oc create -f - <<PVC
- ---
- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: certcheck-reports
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
- PVC
+ oc create -f - <<PVC
+ ---
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: certcheck-reports
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ PVC
With that we can run the `Job` once:
diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml
index b10a0b614..8b560447f 100644
--- a/examples/certificate-check-upload.yaml
+++ b/examples/certificate-check-upload.yaml
@@ -20,28 +20,34 @@ kind: Job
metadata:
name: certificate-check
spec:
- containers:
- - name: openshift-ansible
- image: openshift/openshift-ansible
- env:
- - name: PLAYBOOK_FILE
- value: playbooks/certificate_expiry/easy-mode-upload.yaml
- - name: INVENTORY_FILE
- value: /tmp/inventory/hosts # from configmap vol below
- - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
- value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
- - name: CERT_EXPIRY_WARN_DAYS
- value: "45" # must be a string, don't forget the quotes
- volumeMounts:
- - name: sshkey
- mountPath: /opt/app-root/src/.ssh/id_rsa
- - name: inventory
- mountPath: /tmp/inventory
- volumes:
- - name: sshkey
- secret:
- secretName: sshkey
- - name: inventory
- configMap:
- name: inventory
- restartPolicy: Never
+ parallelism: 1
+ completions: 1
+ template:
+ metadata:
+ name: certificate-check
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/easy-mode-upload.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ restartPolicy: Never
diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml
index c19dc1f88..f6613bcd8 100644
--- a/examples/certificate-check-volume.yaml
+++ b/examples/certificate-check-volume.yaml
@@ -22,33 +22,39 @@ kind: Job
metadata:
name: certificate-check
spec:
- containers:
- - name: openshift-ansible
- image: openshift/openshift-ansible
- env:
- - name: PLAYBOOK_FILE
- value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
- - name: INVENTORY_FILE
- value: /tmp/inventory/hosts # from configmap vol below
- - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
- value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
- - name: CERT_EXPIRY_WARN_DAYS
- value: "45" # must be a string, don't forget the quotes
- volumeMounts:
- - name: sshkey
- mountPath: /opt/app-root/src/.ssh/id_rsa
- - name: inventory
- mountPath: /tmp/inventory
- - name: reports
- mountPath: /var/lib/certcheck
- volumes:
- - name: sshkey
- secret:
- secretName: sshkey
- - name: inventory
- configMap:
- name: inventory
- - name: reports
- persistentVolumeClaim:
- claimName: certcheck-reports
- restartPolicy: Never
+ parallelism: 1
+ completions: 1
+ template:
+ metadata:
+ name: certificate-check
+ spec:
+ containers:
+ - name: openshift-ansible
+ image: openshift/openshift-ansible
+ env:
+ - name: PLAYBOOK_FILE
+ value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
+ - name: INVENTORY_FILE
+ value: /tmp/inventory/hosts # from configmap vol below
+ - name: ANSIBLE_PRIVATE_KEY_FILE # from secret vol below
+ value: /opt/app-root/src/.ssh/id_rsa/ssh-privatekey
+ - name: CERT_EXPIRY_WARN_DAYS
+ value: "45" # must be a string, don't forget the quotes
+ volumeMounts:
+ - name: sshkey
+ mountPath: /opt/app-root/src/.ssh/id_rsa
+ - name: inventory
+ mountPath: /tmp/inventory
+ - name: reports
+ mountPath: /var/lib/certcheck
+ volumes:
+ - name: sshkey
+ secret:
+ secretName: sshkey
+ - name: inventory
+ configMap:
+ name: inventory
+ - name: reports
+ persistentVolumeClaim:
+ claimName: certcheck-reports
+ restartPolicy: Never
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b550bd16a..8b279981d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=too-many-lines
"""
Custom filters for use in openshift-ansible
@@ -11,6 +10,7 @@ import pdb
import random
import re
+from base64 import b64encode
from collections import Mapping
# pylint no-name-in-module and import-error disabled here because pylint
# fails to properly detect the packages when installed in a virtualenv
@@ -672,8 +672,7 @@ def oo_generate_secret(num_bytes):
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
- secret = os.urandom(num_bytes)
- return secret.encode('base-64').strip()
+ return b64encode(os.urandom(num_bytes)).decode('utf-8')
def to_padded_yaml(data, level=0, indent=2, **kw):
@@ -773,6 +772,23 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+ persistent_volumes.append(persistent_volume)
elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py
index 8c7302052..cad95ea6d 100644
--- a/filter_plugins/openshift_node.py
+++ b/filter_plugins/openshift_node.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-node
'''
diff --git a/filter_plugins/openshift_version.py b/filter_plugins/openshift_version.py
index 1403e9dcc..809e82488 100644
--- a/filter_plugins/openshift_version.py
+++ b/filter_plugins/openshift_version.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
-
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom version comparison filters for use in openshift-ansible
"""
diff --git a/hack/hooks/README.md b/hack/hooks/README.md
new file mode 100644
index 000000000..ef870540a
--- /dev/null
+++ b/hack/hooks/README.md
@@ -0,0 +1,37 @@
+# OpenShift-Ansible Git Hooks
+
+## Introduction
+
+This `hack` sub-directory holds
+[git commit hooks](https://www.atlassian.com/git/tutorials/git-hooks#conceptual-overview)
+you may use when working on openshift-ansible contributions. See the
+README in each sub-directory for an overview of what each hook does
+and if the hook has any specific usage or setup instructions.
+
+## Usage
+
+Basic git hook usage is simple:
+
+1) Copy (or symbolic link) the hook to the `$REPO_ROOT/.git/hooks/` directory
+2) Make the hook executable (`chmod +x $PATH_TO_HOOK`)
+
+## Multiple Hooks of the Same Type
+
+If you want to install multiple hooks of the same type, for example:
+multiple `pre-commit` hooks, you will need some kind of *hook
+dispatcher*. For an example of an easy to use hook dispatcher check
+out this gist by carlos-jenkins:
+
+* [multihooks.py](https://gist.github.com/carlos-jenkins/89da9dcf9e0d528ac978311938aade43)
+
+## Contributing Hooks
+
+If you want to contribute a new hook there are only a few criteria
+that must be met:
+
+* The hook **MUST** include a README describing the purpose of the hook
+* The README **MUST** describe special setup instructions if they are required
+* The hook **MUST** be in a sub-directory of this directory
+* The hook file **MUST** be named following the standard git hook
+ naming pattern (i.e., pre-commit hooks **MUST** be called
+ `pre-commit`)
diff --git a/hack/hooks/verify_generated_modules/README.md b/hack/hooks/verify_generated_modules/README.md
new file mode 100644
index 000000000..093fcf76a
--- /dev/null
+++ b/hack/hooks/verify_generated_modules/README.md
@@ -0,0 +1,19 @@
+# Verify Generated Modules
+
+Pre-commit hook for verifying that generated library modules match
+their EXPECTED content. Library modules are generated from fragments
+under the `roles/lib_(openshift|utils)/src/` directories.
+
+If the attempted commit modified files under the
+`roles/lib_(openshift|utils)/` directories this script will run the
+`generate.py --verify` command.
+
+This script will **NOT RUN** if module source fragments are modified
+but *not part of the commit*. I.e., you can still make commits if you
+modified module fragments AND other files but are *not comitting the
+the module fragments*.
+
+# Setup Instructions
+
+Standard installation procedure. Copy the hook to the `.git/hooks/`
+directory and ensure it is executable.
diff --git a/hack/hooks/verify_generated_modules/pre-commit b/hack/hooks/verify_generated_modules/pre-commit
new file mode 100755
index 000000000..8a319fd7e
--- /dev/null
+++ b/hack/hooks/verify_generated_modules/pre-commit
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+######################################################################
+# Pre-commit hook for verifying that generated library modules match
+# their EXPECTED content. Library modules are generated from fragments
+# under the 'roles/lib_(openshift|utils)/src/' directories.
+#
+# If the attempted commit modified files under the
+# 'roles/lib_(openshift|utils)/' directories this script will run the
+# 'generate.py --verify' command.
+#
+# This script will NOT RUN if module source fragments are modified but
+# not part of the commit. I.e., you can still make commits if you
+# modified module fragments AND other files but are not comitting the
+# the module fragments.
+
+# Did the commit modify any source module files?
+CHANGES=`git diff-index --stat --cached HEAD | grep -E '^ roles/lib_(openshift|utils)/src/(class|doc|ansible|lib)/'`
+RET_CODE=$?
+ABORT=0
+
+if [ "${RET_CODE}" -eq "0" ]; then
+ # Modifications detected. Run the verification scripts.
+
+ # Which was it?
+ if $(echo $CHANGES | grep -q 'roles/lib_openshift/'); then
+ echo "Validating lib_openshift..."
+ ./roles/lib_openshift/src/generate.py --verify
+ if [ "${?}" -ne "0" ]; then
+ ABORT=1
+ fi
+ fi
+
+ if $(echo $CHANGES | grep -q 'roles/lib_utils/'); then
+ echo "Validating lib_utils..."
+ ./roles/lib_utils/src/generate.py --verify
+ if [ "${?}" -ne "0" ]; then
+ ABORT=1
+ fi
+ fi
+
+ if [ "${ABORT}" -eq "1" ]; then
+ cat <<EOF
+
+ERROR: Module verification failed. Generated files do not match fragments.
+
+Choices to continue:
+ 1) Run './roles/lib_(openshift|utils)/src/generate.py' from the root of
+ the repo to regenerate the files
+ 2) Skip verification with '--no-verify' option to 'git commit'
+EOF
+ fi
+fi
+
+exit $ABORT
diff --git a/Dockerfile b/images/installer/Dockerfile
index eecf3630b..1df887f32 100644
--- a/Dockerfile
+++ b/images/installer/Dockerfile
@@ -16,6 +16,12 @@ LABEL name="openshift-ansible" \
USER root
+# Create a symlink to /opt/app-root/src so that files under /usr/share/ansible are accessible.
+# This is required since the system-container uses by default the playbook under
+# /usr/share/ansible/openshift-ansible. With this change we won't need to keep two different
+# configurations for the two images.
+RUN mkdir -p /usr/share/ansible/ && ln -s /opt/app-root/src /usr/share/ansible/openshift-ansible
+
RUN INSTALL_PKGS="skopeo" && \
yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
@@ -39,4 +45,7 @@ ADD . /tmp/src
# as per the INSTALL_OC environment setting above
RUN /usr/libexec/s2i/assemble
+# Add files for running as a system container
+COPY system-container/root /
+
CMD [ "/usr/libexec/s2i/run" ]
diff --git a/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 0d5a6038a..00841e660 100644
--- a/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -20,9 +20,10 @@ LABEL name="openshift3/openshift-ansible" \
# because all content and dependencies (like 'oc') is already
# installed via yum.
USER root
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients" && \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto" && \
yum repolist > /dev/null && \
yum-config-manager --enable rhel-7-server-ose-3.4-rpms && \
+ yum-config-manager --enable rhel-7-server-rh-common-rpms && \
yum install -y $INSTALL_PKGS && \
yum clean all
@@ -38,4 +39,7 @@ ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
WORK_DIR=/usr/share/ansible/openshift-ansible \
OPTS="-v"
+# Add files for running as a system container
+COPY system-container/root /
+
CMD [ "/usr/libexec/s2i/run" ]
diff --git a/images/installer/system-container/README.md b/images/installer/system-container/README.md
new file mode 100644
index 000000000..dc95307e5
--- /dev/null
+++ b/images/installer/system-container/README.md
@@ -0,0 +1,13 @@
+# System container installer
+
+These files are needed to run the installer using an [Atomic System container](http://www.projectatomic.io/blog/2016/09/intro-to-system-containers/).
+
+* config.json.template - Template of the configuration file used for running containers.
+
+* manifest.json - Used to define various settings for the system container, such as the default values to use for the installation.
+
+* run-system-container.sh - Entrypoint to the container.
+
+* service.template - Template file for the systemd service.
+
+* tmpfiles.template - Template file for systemd-tmpfiles.
diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/system-container/root/exports/config.json.template
new file mode 100644
index 000000000..383e3696e
--- /dev/null
+++ b/images/installer/system-container/root/exports/config.json.template
@@ -0,0 +1,223 @@
+{
+ "ociVersion": "1.0.0",
+ "platform": {
+ "os": "linux",
+ "arch": "amd64"
+ },
+ "process": {
+ "terminal": false,
+ "consoleSize": {
+ "height": 0,
+ "width": 0
+ },
+ "user": {
+ "uid": 0,
+ "gid": 0
+ },
+ "args": [
+ "/usr/local/bin/run-system-container.sh"
+ ],
+ "env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "TERM=xterm",
+ "OPTS=$OPTS",
+ "PLAYBOOK_FILE=$PLAYBOOK_FILE"
+ ],
+ "cwd": "/opt/app-root/src/",
+ "rlimits": [
+ {
+ "type": "RLIMIT_NOFILE",
+ "hard": 1024,
+ "soft": 1024
+ }
+ ],
+ "noNewPrivileges": true
+ },
+ "root": {
+ "path": "rootfs",
+ "readonly": true
+ },
+ "mounts": [
+ {
+ "destination": "/proc",
+ "type": "proc",
+ "source": "proc"
+ },
+ {
+ "destination": "/dev",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755",
+ "size=65536k"
+ ]
+ },
+ {
+ "destination": "/dev/pts",
+ "type": "devpts",
+ "source": "devpts",
+ "options": [
+ "nosuid",
+ "noexec",
+ "newinstance",
+ "ptmxmode=0666",
+ "mode=0620",
+ "gid=5"
+ ]
+ },
+ {
+ "destination": "/dev/shm",
+ "type": "tmpfs",
+ "source": "shm",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "mode=1777",
+ "size=65536k"
+ ]
+ },
+ {
+ "destination": "/dev/mqueue",
+ "type": "mqueue",
+ "source": "mqueue",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev"
+ ]
+ },
+ {
+ "destination": "/sys",
+ "type": "sysfs",
+ "source": "sysfs",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "ro"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$SSH_ROOT",
+ "destination": "/opt/app-root/src/.ssh",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$SSH_ROOT",
+ "destination": "/root/.ssh",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$VAR_LIB_OPENSHIFT_INSTALLER",
+ "destination": "/var/lib/openshift-installer",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$VAR_LOG_OPENSHIFT_LOG",
+ "destination": "/var/log/ansible.log",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/root/.ansible",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/tmp",
+ "type": "tmpfs",
+ "source": "tmpfs",
+ "options": [
+ "nosuid",
+ "strictatime",
+ "mode=755"
+ ]
+ },
+ {
+ "type": "bind",
+ "source": "$INVENTORY_FILE",
+ "destination": "/etc/ansible/hosts",
+ "options": [
+ "bind",
+ "rw",
+ "mode=755"
+ ]
+ },
+ {
+ "destination": "/sys/fs/cgroup",
+ "type": "cgroup",
+ "source": "cgroup",
+ "options": [
+ "nosuid",
+ "noexec",
+ "nodev",
+ "relatime",
+ "ro"
+ ]
+ }
+ ],
+ "hooks": {
+
+ },
+ "linux": {
+ "resources": {
+ "devices": [
+ {
+ "allow": false,
+ "access": "rwm"
+ }
+ ]
+ },
+ "namespaces": [
+ {
+ "type": "pid"
+ },
+ {
+ "type": "mount"
+ }
+ ],
+ "maskedPaths": [
+ "/proc/kcore",
+ "/proc/latency_stats",
+ "/proc/timer_list",
+ "/proc/timer_stats",
+ "/proc/sched_debug",
+ "/sys/firmware"
+ ],
+ "readonlyPaths": [
+ "/proc/asound",
+ "/proc/bus",
+ "/proc/fs",
+ "/proc/irq",
+ "/proc/sys",
+ "/proc/sysrq-trigger"
+ ]
+ }
+}
diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json
new file mode 100644
index 000000000..1db845965
--- /dev/null
+++ b/images/installer/system-container/root/exports/manifest.json
@@ -0,0 +1,11 @@
+{
+ "version": "1.0",
+ "defaultValues": {
+ "OPTS": "",
+ "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer",
+ "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
+ "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml",
+ "SSH_ROOT": "/root/.ssh",
+ "INVENTORY_FILE": "/dev/null"
+ }
+}
diff --git a/images/installer/system-container/root/exports/service.template b/images/installer/system-container/root/exports/service.template
new file mode 100644
index 000000000..bf5316af6
--- /dev/null
+++ b/images/installer/system-container/root/exports/service.template
@@ -0,0 +1,6 @@
+[Service]
+ExecStart=$EXEC_START
+ExecStop=-$EXEC_STOP
+Restart=no
+WorkingDirectory=$DESTDIR
+Type=oneshot
diff --git a/images/installer/system-container/root/exports/tmpfiles.template b/images/installer/system-container/root/exports/tmpfiles.template
new file mode 100644
index 000000000..b1f6caf47
--- /dev/null
+++ b/images/installer/system-container/root/exports/tmpfiles.template
@@ -0,0 +1,2 @@
+d $VAR_LIB_OPENSHIFT_INSTALLER - - - - -
+f $VAR_LOG_OPENSHIFT_LOG - - - - -
diff --git a/images/installer/system-container/root/usr/local/bin/run-system-container.sh b/images/installer/system-container/root/usr/local/bin/run-system-container.sh
new file mode 100755
index 000000000..9ce7c7328
--- /dev/null
+++ b/images/installer/system-container/root/usr/local/bin/run-system-container.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+export ANSIBLE_LOG_PATH=/var/log/ansible.log
+exec ansible-playbook -i /etc/ansible/hosts ${OPTS} ${PLAYBOOK_FILE}
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example
new file mode 100644
index 000000000..2dbb57d40
--- /dev/null
+++ b/inventory/byo/hosts.byo.native-glusterfs.example
@@ -0,0 +1,51 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master node=True storage=True master=True
+
+[nodes]
+master node=True storage=True master=True openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index a99423411..20f342023 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -30,17 +30,17 @@ openshift_deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v1.5
+openshift_release=v3.6
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v1.5.0
+#openshift_image_tag=v3.6.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-1.5.0
+#openshift_pkg_version=-3.6.0
# Install the openshift examples
#openshift_install_examples=true
@@ -78,6 +78,18 @@ openshift_release=v1.5
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Force the registry to use for the system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -556,7 +568,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=1.5.0
+#openshift_hosted_logging_deployer_version=3.6.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -568,10 +580,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
@@ -751,7 +770,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing, available as of 1.3
-#openshift_master_audit_config={"basicAuditEnabled": true}
+#openshift_master_audit_config={"enabled": true}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by deployment_type=origin
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 9774aa66b..f75a47bb8 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -30,17 +30,17 @@ openshift_deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.5
+openshift_release=v3.6
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.5.0
+#openshift_image_tag=v3.6.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.5.0
+#openshift_pkg_version=-3.6.0
# Install the openshift examples
#openshift_install_examples=true
@@ -78,6 +78,18 @@ openshift_release=v3.5
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
+# Use Docker inside a System Container. Note that this is a tech preview and should
+# not be used to upgrade!
+# The following options for docker are ignored:
+# - docker_version
+# - docker_upgrade
+# The following options must not be used
+# - openshift_docker_options
+#openshift_docker_use_system_container=False
+# Force the registry to use for the system container. By default the registry
+# will be built off of the deployment type and ansible_distribution. Only
+# use this option if you are sure you know what you are doing!
+#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -557,7 +569,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.5.0
+#openshift_hosted_logging_deployer_version=3.6.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -569,10 +581,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
+#
+# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
+# 172.17.0.0/16. Your installation will fail and/or your configuration change will
+# cause the Pod SDN or Cluster SDN to fail.
+#
+# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
+# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
+# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
-
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
@@ -752,7 +771,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing, available as of 3.2
-#openshift_master_audit_config={"basicAuditEnabled": true}
+#openshift_master_audit_config={"enabled": true}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
diff --git a/library/kubeclient_ca.py b/library/kubeclient_ca.py
index 163624a76..a89a5574f 100644
--- a/library/kubeclient_ca.py
+++ b/library/kubeclient_ca.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
''' kubeclient_ca ansible module '''
import base64
diff --git a/library/modify_yaml.py b/library/modify_yaml.py
index 8706e80c2..9b8f9ba33 100755
--- a/library/modify_yaml.py
+++ b/library/modify_yaml.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
''' modify_yaml ansible module '''
import yaml
diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py
index 7909d0092..4581cb6b8 100644
--- a/lookup_plugins/oo_option.py
+++ b/lookup_plugins/oo_option.py
@@ -1,7 +1,5 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
'''
oo_option lookup plugin for openshift-ansible
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 166d21918..19e6356e7 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.22
+Version: 3.6.68
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -17,7 +17,7 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 2.2.0.0-1
+Requires: ansible >= 2.2.2.0
Requires: python2
Requires: python-six
Requires: tar
@@ -25,6 +25,7 @@ Requires: openshift-ansible-docs = %{version}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
+Requires: python-passlib
%description
Openshift and Atomic Enterprise Ansible
@@ -76,6 +77,9 @@ find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name filter_plugins -
cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv role
rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
+# touch a file in contiv so that it can be added to SCM's
+touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
+
# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
@@ -270,6 +274,308 @@ Atomic OpenShift Utilities includes
%changelog
+* Sat May 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.68-1
+- Updating registry-console image version during a post_control_plane upgrade
+ (ewolinet@redhat.com)
+- Remove userland-proxy-path from daemon.json (smilner@redhat.com)
+- Fix whistespace issues in custom template (smilner@redhat.com)
+- Always add proxy items to atomic.conf (smilner@redhat.com)
+- Move container-engine systemd environment to updated location
+ (smilner@redhat.com)
+- doc: Add link to daemon.json upstream doc (smilner@redhat.com)
+- Remove unused daemon.json keys (smilner@redhat.com)
+- bug 1448860. Change recovery_after_nodes to match node_quorum
+ (jcantril@redhat.com)
+- bug 1441369. Kibana memory limits bug 1439451. Kibana crash
+ (jcantril@redhat.com)
+- Extend repoquery command (of lib_utils role) to ignore excluders
+ (jchaloup@redhat.com)
+- lower case in /etc/daemon.json and correct block-registry (ghuang@redhat.com)
+- Fix for yedit custom separators (mwoodson@redhat.com)
+- Updating 3.6 enterprise registry-console template image version
+ (ewolinet@redhat.com)
+- Default to iptables on master (sdodson@redhat.com)
+- Rename blocked-registries to block-registries (smilner@redhat.com)
+- Ensure true is lowercase in daemon.json (smilner@redhat.com)
+- use docker_log_driver and /etc/docker/daemon.json to determine log driver
+ (rmeggins@redhat.com)
+- Temporarily revert to OSEv3 host group usage (rteague@redhat.com)
+- Add service file templates for master and node (smilner@redhat.com)
+- Update systemd units to use proper container service name
+ (smilner@redhat.com)
+- polish etcd_common role (jchaloup@redhat.com)
+- Note existence of Fedora tests and how to rerun (rhcarvalho@gmail.com)
+- Fix for OpenShift SDN Check (vincent.schwarzer@yahoo.de)
+- Updating oc_obj to use get instead of getattr (ewolinet@redhat.com)
+- Updating size suffix for metrics in role (ewolinet@redhat.com)
+- GlusterFS: Allow swapping an existing registry's backend storage
+ (jarrpa@redhat.com)
+- GlusterFS: Allow for a separate registry-specific playbook
+ (jarrpa@redhat.com)
+- GlusterFS: Improve role documentation (jarrpa@redhat.com)
+- hosted_registry: Get correct pod selector for GlusterFS storage
+ (jarrpa@redhat.com)
+- hosted registry: Fix typo (jarrpa@redhat.com)
+- run excluders over selected set of hosts during control_plane/node upgrade
+ (jchaloup@redhat.com)
+- Reserve kubernetes and 'kubernetes-' prefixed namespaces
+ (jliggitt@redhat.com)
+- oc_volume: Add missing parameter documentation (jarrpa@redhat.com)
+
+* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.67-1
+- byo: correct option name (gscrivan@redhat.com)
+- Fail if rpm version != docker image version (jchaloup@redhat.com)
+- Perform package upgrades in one transaction (sdodson@redhat.com)
+- Properly fail if OpenShift RPM version is undefined (rteague@redhat.com)
+
+* Wed May 10 2017 Scott Dodson <sdodson@redhat.com> 3.6.66-1
+- Fix issue with Travis-CI using old pip version (rteague@redhat.com)
+- Remove vim configuration from Python files (rhcarvalho@gmail.com)
+- Use local variables for daemon.json template (smilner@redhat.com)
+- Fix additional master cert & client config creation. (abutcher@redhat.com)
+
+* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.62-1
+-
+
+* Tue May 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.61-1
+-
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.60-1
+-
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.59-1
+- Updating logging and metrics to restart api, ha and controllers when updating
+ master config (ewolinet@redhat.com)
+- Adding defaults for es_indices (ewolinet@redhat.com)
+- Updating logic for generating pvcs and their counts to prevent reuse when
+ looping (ewolinet@redhat.com)
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.58-1
+- Moving Dockerfile content to images dir (jupierce@redhat.com)
+
+* Mon May 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.57-1
+-
+
+* Sun May 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.56-1
+-
+
+* Sat May 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.55-1
+- Fix 1448368, and some other minors issues (ghuang@redhat.com)
+- mux startup is broken without this fix (rmeggins@redhat.com)
+- Dockerfile: create symlink for /opt/app-root/src (gscrivan@redhat.com)
+- docs: Add basic system container dev docs (smilner@redhat.com)
+- installer: Add system container variable for log saving (smilner@redhat.com)
+- installer: support running as a system container (gscrivan@redhat.com)
+
+* Fri May 05 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.54-1
+- Allow oc_ modules to pass unicode results (rteague@redhat.com)
+- Ensure repo cache is clean on the first run (rteague@redhat.com)
+- move etcdctl.yml from etcd to etcd_common role (jchaloup@redhat.com)
+- Modified pick from release-1.5 for updating hawkular htpasswd generation
+ (ewolinet@redhat.com)
+
+* Thu May 04 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.53-1
+- Correctly setting the primary and replica shard count settings
+ (ewolinet@redhat.com)
+- System container docker (smilner@redhat.com)
+- Stop logging AWS credentials in master role. (dgoodwin@redhat.com)
+- Remove set operations from openshift_master_certificates iteration.
+ (abutcher@redhat.com)
+- Refactor system fact gathering to avoid dictionary size change during
+ iteration. (abutcher@redhat.com)
+- Refactor secret generation for python3. (abutcher@redhat.com)
+- redhat-ci: use requirements.txt (jlebon@redhat.com)
+
+* Wed May 03 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.52-1
+- Making mux with_items list evaluate as empty if didnt get objects before
+ (ewolinet@redhat.com)
+- etcd Upgrade Refactor (rteague@redhat.com)
+- v3.3 Upgrade Refactor (rteague@redhat.com)
+- v3.4 Upgrade Refactor (rteague@redhat.com)
+- v3.5 Upgrade Refactor (rteague@redhat.com)
+- v3.6 Upgrade Refactor (rteague@redhat.com)
+- Fix variants for v3.6 (rteague@redhat.com)
+- Normalizing groups. (kwoodson@redhat.com)
+- Use openshift_ca_host's hostnames to sign the CA (sdodson@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.51-1
+- Remove std_include from playbooks/byo/rhel_subscribe.yml
+ (abutcher@redhat.com)
+- Adding way to add labels and nodeselectors to logging project
+ (ewolinet@redhat.com)
+
+* Tue May 02 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.50-1
+- Don't double quote when conditions (sdodson@redhat.com)
+- Remove jinja template delimeters from when conditions (sdodson@redhat.com)
+- move excluder upgrade validation tasks under openshift_excluder role
+ (jchaloup@redhat.com)
+- Fix test compatibility with OpenSSL 1.1.0 (pierre-
+ louis.bonicoli@libregerbil.fr)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.49-1
+- Warn users about conflicts with docker0 CIDR range (lpsantil@gmail.com)
+- Bump ansible rpm dependency to 2.2.2.0 (sdodson@redhat.com)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.48-1
+-
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.47-1
+-
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.46-1
+- Contrib: Hook to verify modules match assembled fragments
+ (tbielawa@redhat.com)
+
+* Mon May 01 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.45-1
+-
+
+* Sun Apr 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.44-1
+- Refactor etcd roles (jchaloup@redhat.com)
+
+* Sat Apr 29 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.43-1
+- Document the Pull Request process (rhcarvalho@gmail.com)
+- Add Table of Contents (rhcarvalho@gmail.com)
+- Improve Contribution Guide (rhcarvalho@gmail.com)
+- Replace absolute with relative URLs (rhcarvalho@gmail.com)
+- Move repo structure to a separate document (rhcarvalho@gmail.com)
+- Remove outdated information about PRs (rhcarvalho@gmail.com)
+- Move link to BUILD.md to README.md (rhcarvalho@gmail.com)
+- Adding checks for starting mux for 2.2.0 (ewolinet@redhat.com)
+- Fix OpenShift registry deployment on OSE 3.2 (lhuard@amadeus.com)
+
+* Fri Apr 28 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.42-1
+- Fix certificate check Job examples (pep@redhat.com)
+- Add python-boto requirement (pep@redhat.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.41-1
+- Add bool for proper conditional handling (rteague@redhat.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.40-1
+- Fix cluster creation with `bin/cluster` when there’s no glusterfs node
+ (lhuard@amadeus.com)
+
+* Thu Apr 27 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.39-1
+- Move container build instructions to BUILD.md (pep@redhat.com)
+- Elaborate container image usage instructions (pep@redhat.com)
+
+* Wed Apr 26 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.38-1
+- .redhat-ci.yml: also publish journal logs (jlebon@redhat.com)
+- Standardize all Origin versioning on 3.6 (rteague@redhat.com)
+- integration tests: add CI scripts (lmeyer@redhat.com)
+- preflight int tests: define image builds to support tests (lmeyer@redhat.com)
+- preflight int tests: generalize; add tests (lmeyer@redhat.com)
+- Add stub of preflight integration tests (rhcarvalho@gmail.com)
+- Move Python unit tests to subdirectory (rhcarvalho@gmail.com)
+- Revert "Add /etc/sysconfig/etcd to etcd_container" (sdodson@redhat.com)
+- Replace original router cert variable names. (abutcher@redhat.com)
+- oc_obj: Allow for multiple kinds in delete (jarrpa@redhat.com)
+- Update v1.5 content (sdodson@redhat.com)
+- Update v1.6 content (sdodson@redhat.com)
+- Make the rhel_subscribe role subscribe to OSE 3.5 channel by default
+ (lhuard@amadeus.com)
+- Addressing yamllint (ewolinet@redhat.com)
+- Updating kibana-proxy secret key for server-tls entry (ewolinet@redhat.com)
+- Pick from issue3896 (ewolinet@redhat.com)
+- Cleanup comments and remove extraneous tasks (sdodson@redhat.com)
+- Store backups in /var/lib/etcd/openshift-backup (sdodson@redhat.com)
+- Create member/snap directory encase it doesn't exist (sdodson@redhat.com)
+- Copy v3 data dir when performing backup (sdodson@redhat.com)
+
+* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.37-1
+- Differentiate between service serving router certificate and custom
+ openshift_hosted_router_certificate when replacing the router certificate.
+ (abutcher@redhat.com)
+
+* Tue Apr 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.36-1
+- Update swap disable tasks (rteague@redhat.com)
+- Removing resource version to remove object conflicts caused by race
+ conditions. (kwoodson@redhat.com)
+- cast openshift_logging_use_mux_client to bool (rmeggins@redhat.com)
+- mux does not require privileged, only hostmount-anyuid (rmeggins@redhat.com)
+- Switched Heapster to use certificates generated by OpenShift
+ (juraci@kroehling.de)
+- Use metrics and logging deployer tag v3.4 for enterprise (sdodson@redhat.com)
+- Remove v1.5 and v1.6 metrics/logging templates (sdodson@redhat.com)
+
+* Sun Apr 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.35-1
+-
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.34-1
+- GlusterFS: provide default for groups.oo_glusterfs_to_config in with_items
+ (jarrpa@redhat.com)
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.33-1
+- Adding module calls instead of command for idempotency. (kwoodson@redhat.com)
+- Use return_value when value is constant (pierre-
+ louis.bonicoli@libregerbil.fr)
+- Add missing mock for locate_oc_binary method (pierre-
+ louis.bonicoli@libregerbil.fr)
+
+* Fri Apr 21 2017 Scott Dodson <sdodson@redhat.com> 3.6.32-1
+- Don't check excluder versions when they're not enabled (sdodson@redhat.com)
+
+* Fri Apr 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.31-1
+- Stop all services prior to upgrading, start all services after
+ (sdodson@redhat.com)
+
+* Thu Apr 20 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.30-1
+- Add Ansible syntax checks to tox (rteague@redhat.com)
+- Add /etc/sysconfig/etcd to etcd_container (me@fale.io)
+- openshift_version: improve messaging (lmeyer@redhat.com)
+- Simplify memory availability check, review tests (rhcarvalho@gmail.com)
+- Simplify mixin class (rhcarvalho@gmail.com)
+- Simplify disk availability check, review tests (rhcarvalho@gmail.com)
+- add disk and memory availability check tests (jvallejo@redhat.com)
+- add ram and storage preflight check (jvallejo@redhat.com)
+- Fix paths for file includes (rteague@redhat.com)
+- Fix instantiation of action plugin in test fixture (rhcarvalho@gmail.com)
+- Introduce Elasticsearch readiness probe (lukas.vlcek@gmail.com)
+- added a empty file to the contiv empty dir. This allows contiv to be vendored
+ in git (mwoodson@redhat.com)
+
+* Wed Apr 19 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.29-1
+- Create openshift-metrics entrypoint playbook (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.28-1
+- Minor v3.6 upgrade docs fixes (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.27-1
+- repo: start testing PRs on Fedora Atomic Host (jlebon@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.26-1
+- Correct role dependencies (rteague@redhat.com)
+- Allow for GlusterFS to provide registry storage (jarrpa@redhat.com)
+- Integrate GlusterFS into OpenShift installation (jarrpa@redhat.com)
+- GlusterFS playbook and role (jarrpa@redhat.com)
+
+* Mon Apr 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.25-1
+- Fix default image tag for enterprise (sdodson@redhat.com)
+- Cast etcd_debug to a boolean (skuznets@redhat.com)
+
+* Fri Apr 14 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.24-1
+- tox tests: pin test requirement versions (lmeyer@redhat.com)
+- This is no longer a widely encountered issue (sdodson@redhat.com)
+- Standardize use of byo and common for network_manager.yml
+ (rteague@redhat.com)
+- Disable swap space on nodes at install and upgrade (rteague@redhat.com)
+- Do not check package version on non-master/node (rhcarvalho@gmail.com)
+
+* Thu Apr 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.23-1
+- Refactor initialize groups tasks (rteague@redhat.com)
+- tox tests: pin test requirement versions (lmeyer@redhat.com)
+- skip PackageAvailability check if not yum (jvallejo@redhat.com)
+- Document service_type for openshift-enterprise (rhcarvalho@gmail.com)
+- Remove references to outdated deployment_type (rhcarvalho@gmail.com)
+- Update deployment_type documentation (rhcarvalho@gmail.com)
+- Document merge time trends page (rhcarvalho@gmail.com)
+- Remove outdated documentation (rhcarvalho@gmail.com)
+- Remove outdated build instructions (rhcarvalho@gmail.com)
+- openshift_sanitize_inventory: disallow conflicting deployment types
+ (lmeyer@redhat.com)
+- Refactor docker upgrade playbooks (rteague@redhat.com)
+- Changed Hawkular Metrics secrets to use a format similar to the one
+ automatically generated by OpenShift (juraci@kroehling.de)
+
* Wed Apr 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.22-1
- Fixed spelling mistake. (kwoodson@redhat.com)
- Remove unnecessary folder refs (rteague@redhat.com)
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 81c1ee653..64f861c6a 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -20,7 +20,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_volume_size
- cli_device_name
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index f638fab83..507ac0f05 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -33,7 +33,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index d988a28b0..3059d3dc5 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -24,7 +24,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_docker_device
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
index b6dde357e..5e12cd181 100644
--- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -25,7 +25,7 @@
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index daff68fbe..cacd0b0f3 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-ansible
'''
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index 598f1966d..eb8440d1b 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -42,7 +42,7 @@
pre_tasks:
- fail:
msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
+ when: item is not defined or item == ''
with_items:
- cli_tag_name
- cli_volume_size
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index ffdcd0ce1..6119990fe 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -125,7 +125,7 @@
- name: Remove flannel package
package: name=flannel state=absent
when: openshift_use_flannel | default(false) | bool
- when: "{{ not is_atomic | bool }}"
+ when: not is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -146,7 +146,7 @@
- lbr0
- vlinuxbr
- vovsbr
- when: "{{ openshift_remove_all | default(true) | bool }}"
+ when: openshift_remove_all | default(true) | bool
- shell: atomic uninstall "{{ item }}"-master-api
changed_when: False
@@ -239,7 +239,7 @@
changed_when: False
failed_when: False
with_items: "{{ images_to_delete.results }}"
- when: "{{ openshift_uninstall_images | default(True) | bool }}"
+ when: openshift_uninstall_images | default(True) | bool
- name: remove sdn drop files
file:
@@ -252,7 +252,7 @@
- /etc/sysconfig/openshift-node
- /etc/sysconfig/openvswitch
- /run/openshift-sdn
- when: "{{ openshift_remove_all | default(True) | bool }}"
+ when: openshift_remove_all | default(True) | bool
- find: path={{ item }} file_type=file
register: files
@@ -305,8 +305,15 @@
- shell: systemctl daemon-reload
changed_when: False
+ - name: restart container-engine
+ service: name=container-engine state=restarted
+ ignore_errors: true
+ register: container_engine
+
- name: restart docker
service: name=docker state=restarted
+ ignore_errors: true
+ when: "container_engine.state != 'started'"
- name: restart NetworkManager
service: name=NetworkManager state=restarted
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index fbaf81dec..119df9c7d 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index cb464cf0d..9d086b7b6 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -13,6 +13,10 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
+
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
| union(g_lb_hosts) | union(g_nfs_hosts)
| union(g_new_node_hosts)| union(g_new_master_hosts)
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 4db0720d0..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 32f9ebfd3..9ce8f0d3c 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,26 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: initialize_groups.yml
- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml
new file mode 100644
index 000000000..2a725510a
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/initialize_groups.yml
@@ -0,0 +1,10 @@
+---
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index f8eebe898..76f165c6d 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -4,29 +4,7 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: initialize_groups.yml
- include: ../../common/openshift-cluster/openshift_logging.yml
vars:
diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml
new file mode 100644
index 000000000..5ad3a1a01
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_metrics.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index ad24b9ad0..012ce69ec 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
index ee49364fa..8516baee8 100644
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
index 9c8248c4e..566e8b261 100644
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
index 1695111d0..42777e5e6 100644
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
index e44e95467..3b33e0d6f 100644
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
index 53ee68db9..30feabab3 100644
--- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
index f8c267569..2630fb234 100644
--- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0425ba518..0f64f40f3 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x)
-- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x)
+- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
+- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
+- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 5c1c29395..7f31e26e1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,27 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
+- include: ../../initialize_groups.yml
- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
index 106dcc12d..8005a17a3 100644
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -1,26 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
+- include: ../initialize_groups.yml
- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index d268850d8..697a18c4d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -2,106 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d11e51640..4d284c279 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 5a0f143ac..180a2821f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -4,103 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 25d8cd2ba..8cce91b3f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -2,104 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d52f3c111..8e5d0f5f9 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 07c734a40..d5329b858 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 86f5a36ca..f44d55ad2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -2,110 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index a2f1cd2b1..2377713fa 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -11,105 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index f858de3d5..5b3f6ab06 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
index 930cc753c..797af671a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
@@ -1,11 +1,10 @@
-# v3.5 Major and Minor Upgrade Playbook
+# v3.6 Major and Minor Upgrade Playbook
## Overview
-This playbook currently performs the
-following steps.
+This playbook currently performs the following steps.
* Upgrade and restart master services
- * Unschedule node.
+ * Unschedule node
* Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
@@ -15,4 +14,7 @@ following steps.
* Updates image streams and quickstarts
## Usage
+
+```
ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
index 900bbc8d8..40120b3e8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -2,110 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play.
-# So it is necassary to run the play after running disable_excluder.yml.
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 5bd0f7ac5..408a4c631 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -11,105 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 96d89dbdd..b5f42b804 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.6'
- openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
new file mode 100644
index 000000000..dd3f47a4d
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/config.yml
@@ -0,0 +1,14 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-etcd/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
+ openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index 19403116d..d43533641 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md
new file mode 100644
index 000000000..f62aea229
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/README.md
@@ -0,0 +1,98 @@
+# OpenShift GlusterFS Playbooks
+
+These playbooks are intended to enable the use of GlusterFS volumes by pods in
+OpenShift. While they try to provide a sane set of defaults they do cover a
+variety of scenarios and configurations, so read carefully. :)
+
+## Playbook: config.yml
+
+This is the main playbook that integrates GlusterFS into a new or existing
+OpenShift cluster. It will also, if specified, configure a hosted Docker
+registry with GlusterFS backend storage.
+
+This playbook requires the `glusterfs` group to exist in the Ansible inventory
+file. The hosts in this group are the nodes of the GlusterFS cluster.
+
+ * If this is a newly configured cluster each host must have a
+ `glusterfs_devices` variable defined, each of which must be a list of block
+ storage devices intended for use only by the GlusterFS cluster. If this is
+ also an external GlusterFS cluster, you must specify
+ `openshift_storage_glusterfs_is_native=False`. If the cluster is to be
+ managed by an external heketi service you must also specify
+ `openshift_storage_glusterfs_heketi_is_native=False` and
+ `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi
+ service. All these variables are specified in `[OSEv3:vars]`,
+ * If this is an existing cluster you do not need to specify a list of block
+ devices but you must specify the following variables in `[OSEv3:vars]`:
+ * `openshift_storage_glusterfs_is_missing=False`
+ * `openshift_storage_glusterfs_heketi_is_missing=False`
+
+By default, pods for a native GlusterFS cluster will be created in the
+`default` namespace. To change this, specify
+`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`.
+
+To configure the deployment of a Docker registry with GlusterFS backend
+storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in
+`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the
+registry, specify a `glusterfs_registry` group that is populated as the
+`glusterfs` is with the nodes for the separate cluster. If no
+`glusterfs_registry` group is specified, the cluster defined by the `glusterfs`
+group will be used.
+
+To swap an existing hosted registry's backend storage for a GlusterFS volume,
+specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To
+additoinally copy any existing contents from an existing hosted registry,
+specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`.
+
+**NOTE:** For each namespace that is to have access to GlusterFS volumes an
+Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding
+Service resource must be created. If dynamic provisioning using StorageClasses
+is configure, these resources are created automatically in the namespaces that
+require them. This playbook also takes care of creating these resources in the
+namespaces used for deployment.
+
+An example of a minimal inventory file:
+```
+[OSEv3:children]
+masters
+nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+node0
+node1
+node2
+
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/sdb" ]'
+node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]'
+node2 glusterfs_devices='[ "/dev/sdd" ]'
+```
+
+## Playbook: registry.yml
+
+This playbook is intended for admins who want to deploy a hosted Docker
+registry with GlusterFS backend storage on an existing OpenShift cluster. It
+has all the same requirements and behaviors as `config.yml`.
+
+## Role: openshift_storage_glusterfs
+
+The bulk of the work is done by the `openshift_storage_glusterfs` role. This
+role can handle the deployment of GlusterFS (if it is to be hosted on the
+OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone),
+and (if specified) integration as backend storage for a hosted Docker registry.
+
+See the documentation in the role's directory for further details.
+
+## Role: openshift_hosted
+
+The `openshift_hosted` role recognizes `glusterfs` as a possible storage
+backend for a hosted docker registry. It will also, if configured, handle the
+swap of an existing registry's backend storage to a GlusterFS volume.
diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..3f11f3991
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/config.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/config.yml
diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..6ee6febdb
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/registry.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/registry.yml
diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/byo/openshift-glusterfs/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 21e4cff1b..7988863f3 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index a5705e990..8aa07a664 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,27 +1,5 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../openshift-cluster/initialize_groups.yml
- include: ../../common/openshift-master/scaleup.yml
vars:
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
index 9bb3ea17f..b23692237 100644
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -1,42 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../openshift-cluster/initialize_groups.yml
-- name: Install and configure NetworkManager
- hosts: l_oo_all_hosts
- become: yes
- tasks:
- - name: install NetworkManager
- package:
- name: 'NetworkManager'
- state: present
-
- - name: configure NetworkManager
- lineinfile:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
- regexp: '^{{ item }}='
- line: '{{ item }}=yes'
- state: present
- create: yes
- with_items:
- - 'USE_PEERDNS'
- - 'NM_CONTROLLED'
-
- - name: enable and start NetworkManager
- service:
- name: 'NetworkManager'
- state: started
- enabled: yes
+- include: ../../common/openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 6861625b9..92665d71d 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -1,4 +1,8 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 88d236b53..c6965fd6f 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -1,27 +1,5 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../openshift-cluster/initialize_groups.yml
- include: ../../common/openshift-node/scaleup.yml
vars:
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index c5f05d0f0..eb763221f 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,6 +1,9 @@
---
-- hosts: OSEv3
- name: run OpenShift health checks
+- include: ../openshift-cluster/initialize_groups.yml
+
+- name: Run OpenShift health checks
+ # Temporarily reverting to OSEv3 until group standardization is complete
+ hosts: OSEv3
roles:
- openshift_health_checker
post_tasks:
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index a21b6a0a5..a8c1c3a88 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,9 +1,14 @@
---
+- include: openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../common/openshift-cluster/std_include.yml
tags:
- always
- name: Gather Cluster facts
+ # Temporarily reverting to OSEv3 until group standardization is complete
hosts: OSEv3
roles:
- openshift_facts
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 8c6d77024..1b14ff32e 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,10 +1,11 @@
---
-- include: ../common/openshift-cluster/std_include.yml
+- include: openshift-cluster/initialize_groups.yml
tags:
- always
- name: Subscribe hosts, update repos and update OS packages
- hosts: l_oo_all_hosts
+ # Temporarily reverting to OSEv3 until group standardization is complete
+ hosts: OSEv3
roles:
- role: rhel_subscribe
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 3c70db6a9..1482b3a3f 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -3,9 +3,15 @@
tags:
- always
-- include: disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-etcd/config.yml
tags:
@@ -31,10 +37,20 @@
tags:
- node
+- include: ../openshift-glusterfs/config.yml
+ tags:
+ - glusterfs
+
- include: openshift_hosted.yml
tags:
- hosted
-- include: reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config:oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
deleted file mode 100644
index f664c51c9..000000000
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
-
- # During installation the excluders are installed with present state.
- # So no pre-validation check here as the excluders are either to be installed (present = latest)
- # or they are not going to be updated if already installed
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: present
- docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 45a4875a3..46932b27f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,29 +5,40 @@
become: no
gather_facts: no
tasks:
- - fail:
+ - name: Evaluate groups - g_etcd_hosts required
+ fail:
msg: This playbook requires g_etcd_hosts to be set
- when: "{{ g_etcd_hosts is not defined }}"
+ when: g_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
+ when: g_master_hosts is not defined or g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
+ when: g_node_hosts is not defined or g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
- when: "{{ g_lb_hosts is not defined }}"
+ when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
- when: "{{ g_nfs_hosts is not defined }}"
+ when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ when: (groups[g_nfs_hosts] | default([])) | length > 1
+
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
+ msg: This playbook requires g_glusterfs_hosts to be set
+ when: g_glusterfs_hosts is not defined
- name: Evaluate oo_all_hosts
add_host:
@@ -47,13 +58,13 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ when: g_master_hosts|length > 0
changed_when: no
- name: Evaluate oo_masters_to_config
@@ -65,41 +76,59 @@
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_nodes_to_config
+ - name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_nodes_to_config
+ groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
changed_when: no
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
+ - name: Evaluate oo_first_etcd
add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ when: g_etcd_hosts|length > 0
changed_when: no
- - name: Evaluate oo_first_etcd
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ changed_when: False
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: "{{ g_etcd_hosts|length > 0 }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
changed_when: no
- - name: Evaluate oo_first_master
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Add master to oo_nodes_to_config
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: "{{ g_master_hosts|length > 0 }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- name: Evaluate oo_lb_to_config
@@ -119,3 +148,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
changed_when: no
+
+ - name: Evaluate oo_glusterfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_glusterfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 07b38920f..f4e52869e 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,13 +1,14 @@
---
# NOTE: requires openshift_facts be run
- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
gather_facts: no
tasks:
# See:
# https://bugzilla.redhat.com/show_bug.cgi?id=1395047
# https://bugzilla.redhat.com/show_bug.cgi?id=1282961
# https://github.com/openshift/openshift-ansible/issues/1138
+ # Consider the repoquery module for this work
- name: Check for bad combinations of yum and subscription-manager
command: >
{{ repoquery_cmd }} --installed --qf '%{version}' "yum"
@@ -16,7 +17,7 @@
when: not openshift.common.is_atomic | bool
- fail:
msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+ when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index d96a78c4c..57580406c 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,11 +1,13 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
- openshift_logging
- name: Update Master configs
- hosts: masters:!oo_first_master
+ hosts: oo_masters:!oo_first_master
tasks:
- block:
- include_role:
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9f38ceea6..bcff4a1a1 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,4 +1,6 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Metrics
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 3b26abcc7..4fa7f9cdf 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -130,7 +130,7 @@
state: absent
changed_when: false
-- include: ../openshift-etcd/restart.yml
+- include: ../../openshift-etcd/restart.yml
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -322,7 +322,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../openshift-master/restart.yml
+- include: ../../openshift-master/restart.yml
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -371,4 +371,4 @@
state: absent
changed_when: false
-- include: ../openshift-node/restart.yml
+- include: ../../openshift-node/restart.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index a7b614341..9f14f2d69 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -67,7 +67,66 @@
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+
+ - block:
+ - assert:
+ that:
+ - "'certfile' in openshift_hosted_router_certificate"
+ - "'keyfile' in openshift_hosted_router_certificate"
+ - "'cafile' in openshift_hosted_router_certificate"
+ msg: |-
+ openshift_hosted_router_certificate has been set in the inventory but is
+ missing one or more required keys. Ensure that 'certfile', 'keyfile',
+ and 'cafile' keys have been specified for the openshift_hosted_router_certificate
+ inventory variable.
+
+ - name: Read router certificate and key
+ become: no
+ local_action:
+ module: slurp
+ src: "{{ item }}"
+ register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
+ with_items:
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
+
+ - name: Write temporary router certificate file
+ copy:
+ content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ mode: 0600
+
+ - name: Write temporary router key file
+ copy:
+ content: "{{ (openshift_router_certificate_output.results
+ | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ mode: 0600
+
+ - name: Replace router-certs secret
+ shell: >
+ {{ openshift.common.client_binary }} secrets new router-certs
+ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ --type=kubernetes.io/tls
+ --confirm
+ -o json | {{ openshift.common.client_binary }} replace -f -
+
+ - name: Remove temporary router certificate and key files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
- name: Redeploy router
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
deleted file mode 100644
index eaa8ce39c..000000000
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 74cc1d527..6ed31a644 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-
- include: evaluate_groups.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
deleted file mode 100644
index d1e431c5e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Record excluder state and disable
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include: pre/validate_excluder.yml
- vars:
- #repoquery_cmd: repoquery_cmd
- #openshift_upgrade_target: openshift_upgrade_target
- excluder: "{{ item }}"
- with_items:
- - "{{ openshift.common.service_type }}-docker-excluder"
- - "{{ openshift.common.service_type }}-excluder"
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: latest
- docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
new file mode 100644
index 000000000..800621857
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
new file mode 100644
index 000000000..7988e97ab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 7ef79afa9..9d0333ca8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,10 +1,10 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
+ hosts: oo_etcd_hosts_to_backup
vars:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
roles:
- openshift_facts
tasks:
@@ -13,29 +13,20 @@
role: etcd
local_facts: {}
when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+ - set_fact:
+ etcd_backup_dir: "{{ openshift.etcd.etcd_data_dir }}/openshift-backup-{{ backup_tag | default('') }}{{ timestamp }}"
# TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1
register: avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
# TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ - name: Check current etcd disk usage
+ shell: du --exclude='*openshift-backup*' -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
register: etcd_disk_usage
when: embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
@@ -52,37 +43,42 @@
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
#
- # For embedded or containerized we need to use the latest because OCP 3.3 uses
- # a version of etcd that can only be backed up with etcd-3.x and if it's
- # containerized then etcd version may be newer than that on the host so
- # upgrade it.
- #
- # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
- # if you use package there, like this: "Could not find a module for unknown."
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ # For containerized installs we now exec into etcd_container
#
- # TODO - We should refactor all containerized backups to use the containerized
- # version of etcd to perform the backup rather than relying on the host's
- # binaries. Until we do that we'll continue to have problems backing up etcd
- # when atomic host has an older version than the version that's running in the
- # container whether that's embedded or not
- - name: Install latest etcd for containerized or embedded
+ # For embedded non containerized we need to ensure we have the latest version
+ # etcd on the host.
+ - name: Install latest etcd for embedded
package:
name: etcd
state: latest
- when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
+ when:
+ - embedded_etcd | bool
+ - not openshift.common.is_atomic | bool
- name: Generate etcd backup
command: >
{{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+ --backup-dir={{ etcd_backup_dir }}
+
+ # According to the docs change you can simply copy snap/db
+ # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
+ - name: Check for v3 data store
+ stat:
+ path: "{{ openshift.etcd.etcd_data_dir }}/member/snap/db"
+ register: v3_db
+
+ - name: Copy etcd v3 data store
+ command: >
+ cp -a {{ openshift.etcd.etcd_data_dir }}/member/snap/db
+ {{ etcd_backup_dir }}/member/snap/
+ when: v3_db.stat.exists
- set_fact:
etcd_backup_complete: True
- name: Display location of etcd backup
debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+ msg: "Etcd backup created in {{ etcd_backup_dir }}"
- name: Gate on etcd backup
hosts: localhost
@@ -91,10 +87,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
deleted file mode 120000
index 641e04e44..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1 +0,0 @@
-../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..d9b59edcb 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,32 +5,6 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
@@ -38,9 +12,11 @@
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include: roles/etcd/tasks/etcdctl.yml
+ - include_role:
+ name: etcd_common
+ tasks_from: etcdctl.yml
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..54f9e21a1 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,49 +1,67 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
+ - block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
when: not openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
+
+ - block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+
+ when:
+ - openshift.common.is_containerized | bool
# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
# through hosts, then loop through tasks only when appropriate
- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.1'
@@ -52,7 +70,7 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.2'
@@ -61,16 +79,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.2.5
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '2.3'
@@ -79,16 +97,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 2.3.7
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: '3.0'
@@ -97,16 +115,16 @@
when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
vars:
upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+ when: etcd_container_version | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- include: fedora_tasks.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index cc1fa5a0a..0f421928b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,28 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
-
- include: ../evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
@@ -34,17 +10,6 @@
- include: ../initialize_facts.yml
-- name: Ensure clean repo cache in the event repos have been changed manually
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
- args:
- warn: no
-
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 673f11889..4eac8b067 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 0d7cdb227..9b76f1dd0 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,6 +9,8 @@
replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
replace ( '${version}', openshift_image_tag ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | replace ( '${component}', 'registry-console') |
+ replace ( '${version}', openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -61,6 +63,26 @@
when:
- _default_registry.results.results[0] != {}
+ - name: Check for registry-console
+ oc_obj:
+ state: list
+ kind: dc
+ name: registry-console
+ register: _registry_console
+ when:
+ - openshift.common.deployment_type != 'origin'
+
+ - name: Update registry-console image to current version
+ oc_edit:
+ kind: dc
+ name: registry-console
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_console_image }}"
+ when:
+ - openshift.common.deployment_type != 'origin'
+ - _registry_console.results.results[0] != {}
+
roles:
- openshift_manageiq
# Create the new templates shipped in 3.2, existing templates are left
@@ -97,6 +119,12 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- include: ../reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
deleted file mode 100644
index 6de1ed061..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# input variables:
-# - repoquery_cmd
-# - excluder
-# - openshift_upgrade_target
-- block:
- - name: Get available excluder version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
- register: excluder_version
- failed_when: false
- changed_when: false
-
- - name: Docker excluder version detected
- debug:
- msg: "{{ excluder }}: {{ excluder_version.stdout }}"
-
- - name: Printing upgrade target version
- debug:
- msg: "{{ openshift_upgrade_target }}"
-
- - name: Check the available {{ excluder }} version is at most of the upgrade target version
- fail:
- msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
- when:
- - "{{ excluder_version.stdout != '' }}"
- - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
- when:
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index c83923dae..6a9f88707 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,21 +1,13 @@
---
- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
+
+ tasks:
+ - name: Fail when OpenShift is not installed
+ fail:
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
-
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -23,19 +15,31 @@
changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
+ - when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - avail_openshift_version.stdout == ''
- - fail:
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when:
+ - not avail_openshift_version | skipped
+ - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+ - name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 03ac02e9f..164baca81 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,27 +1,39 @@
---
-# We verified latest rpm available is suitable, so just yum update.
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
+
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
-# Master package upgrade ends up depending on node and sdn packages, we need to be explicit
-# with all versions to avoid yum from accidentally jumping to something newer than intended:
- name: Upgrade master packages
- package: name={{ item }} state=present
- when: component == "master"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ package: name={{ master_pkgs | join(',') }} state=present
+ vars:
+ master_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "master"
+ - not openshift.common.is_atomic | bool
- name: Upgrade node packages
- package: name={{ item }} state=present
- when: component == "node"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
-
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+ package: name={{ node_pkgs | join(',') }} state=present
+ vars:
+ node_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "node"
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c6e799261..0ad934d2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,6 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e9f894942..4d455fe0a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -34,6 +34,9 @@
- openshift_facts
- docker
- openshift_node_upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
- name: Set node schedulability
@@ -46,7 +49,3 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
-
-- include: ../reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 88f2ddc78..83d2cec81 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -63,12 +63,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+ when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
- debug:
msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+ when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+ when: openshift_master_scheduler_predicates | default(none) is not none
# Handle cases where openshift_master_predicates is not defined
- block:
@@ -87,7 +87,7 @@
when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+ when: openshift_master_scheduler_predicates | default(none) is none
# Upgrade priorities
@@ -120,12 +120,12 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+ when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
- debug:
msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+ when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+ when: openshift_master_scheduler_priorities | default(none) is not none
# Handle cases where openshift_master_priorities is not defined
- block:
@@ -144,7 +144,7 @@
when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+ when: openshift_master_scheduler_priorities | default(none) is none
# Update scheduler
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 68c71a132..d69472fad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,7 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..d81a13ef2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,111 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..8a692d02b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..2d30bba94
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,106 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 43c2ffcd4..ed89dbe8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,7 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..e9ff47f32
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,109 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..d4ae8d8b4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,111 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..ae205b172
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..1269634d1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,113 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..21c075678
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..e67e169fc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..a1b1f3301
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,113 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..af6e1f71b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,115 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../post_control_plane.yml
+
+- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..285c18b7b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,104 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
index ac5704f69..78c1767b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -7,4 +7,6 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
- tasks: []
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..1efdfb336
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -0,0 +1,23 @@
+---
+- name: Open firewall ports for GlusterFS
+ hosts: oo_glusterfs_to_config
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - role: os_firewall
+ when:
+ - openshift_storage_glusterfs_is_native | default(True)
+
+- name: Configure GlusterFS
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..80cf7529e
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/registry.yml
@@ -0,0 +1,49 @@
+---
+- include: config.yml
+
+- name: Initialize GlusterFS registry PV and PVC vars
+ hosts: oo_first_master
+ tags: hosted
+ tasks:
+ - set_fact:
+ glusterfs_pv: []
+ glusterfs_pvc: []
+
+ - set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
+ path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Create persistent volumes
+ hosts: oo_first_master
+ tags:
+ - hosted
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 92f16dc47..bc61ee9bb 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -51,7 +51,7 @@
changed_when: false
- name: Configure docker hosts
- hosts: oo_masters_to-config:oo_nodes_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_config
vars:
docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
@@ -60,9 +60,15 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-master/config.yml
@@ -70,6 +76,12 @@
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
new file mode 100644
index 000000000..0014a5dbd
--- /dev/null
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -0,0 +1,26 @@
+---
+- name: Install and configure NetworkManager
+ hosts: oo_all_hosts
+ become: yes
+ tasks:
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
+
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index c31aca62b..40da8990d 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -27,12 +27,24 @@
- openshift_facts
- openshift_docker
-- include: ../openshift-cluster/disable_excluder.yml
+- name: Disable excluders
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- include: ../openshift-node/config.yml
-- include: ../openshift-cluster/reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index 74e2420db..05a58db73 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index 74e2420db..05a58db73 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 78581fdfe..ccd29be29 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -14,7 +14,7 @@
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
- when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+ when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
register: downloaded_image
- name: Uncompress xz compressed base cloud image
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 98434439c..505f7b3a8 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -8,6 +8,8 @@ g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([]
g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
+g_glusterfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-glusterfs'] | default([])) }}"
+
g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
diff --git a/requirements.txt b/requirements.txt
index 241313b6f..1996a967d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,9 @@
-ansible>=2.2
-click
-pyOpenSSL
+# Versions are pinned to prevent pypi releases arbitrarily breaking
+# tests with new APIs/semantics. We want to update versions deliberately.
+ansible==2.2.2.0
+boto==2.45.0
+click==6.7
+pyOpenSSL==16.2.0
# We need to disable ruamel.yaml for now because of test failures
#ruamel.yaml
-six
+six==1.10.0
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index a81fc3af7..a16a7da71 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -4,7 +4,11 @@ etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_url
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
+cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
+
+calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
+calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 65d75cf00..53cecfcc3 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -5,4 +5,6 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index 287fed321..fa5e338b3 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -7,7 +7,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
-- name: Assure the calico certs have been generated
+- name: Calico Node | Assure the calico certs have been generated
stat:
path: "{{ item }}"
with_items:
@@ -15,12 +15,12 @@
- "{{ calico_etcd_cert_file}}"
- "{{ calico_etcd_key_file }}"
-- name: Configure Calico service unit file
+- name: Calico Node | Configure Calico service unit file
template:
dest: "/lib/systemd/system/calico.service"
src: calico.service.j2
-- name: Enable calico
+- name: Calico Node | Enable calico
become: yes
systemd:
name: calico
@@ -29,46 +29,46 @@
enabled: yes
register: start_result
-- name: Assure CNI conf dir exists
+- name: Calico Node | Assure CNI conf dir exists
become: yes
file: path="{{ cni_conf_dir }}" state=directory
-- name: Generate Calico CNI config
+- name: Calico Node | Generate Calico CNI config
become: yes
template:
- src: "calico.conf.j2"
+ src: "10-calico.conf.j2"
dest: "{{ cni_conf_dir }}/10-calico.conf"
-- name: Assures Kuberentes CNI bin dir exists
+- name: Calico Node | Assures Kuberentes CNI bin dir exists
become: yes
file: path="{{ cni_bin_dir }}" state=directory
-- name: Download Calico CNI Plugin
+- name: Calico Node | Download Calico CNI Plugin
become: yes
get_url:
- url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico
+ url: "{{ calico_url_cni }}"
dest: "{{ cni_bin_dir }}"
mode: a+x
-- name: Download Calico IPAM Plugin
+- name: Calico Node | Download Calico IPAM Plugin
become: yes
get_url:
- url: https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam
+ url: "{{ calico_url_ipam }}"
dest: "{{ cni_bin_dir }}"
mode: a+x
-- name: Download and unzip standard CNI plugins
+- name: Calico Node | Download and extract standard CNI plugins
become: yes
unarchive:
remote_src: True
- src: https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz
+ src: "{{ cni_url }}"
dest: "{{ cni_bin_dir }}"
-- name: Assure Calico conf dir exists
+- name: Calico Node | Assure Calico conf dir exists
become: yes
file: path=/etc/calico/ state=directory
-- name: Set calicoctl.cfg
+- name: Calico Node | Set calicoctl.cfg
template:
- src: calico.cfg.j2
+ src: calicoctl.cfg.j2
dest: "/etc/calico/calicoctl.cfg"
diff --git a/roles/calico/templates/calico.cfg.j2 b/roles/calico/templates/10-calico.cfg.j2
index 722385ed8..722385ed8 100644
--- a/roles/calico/templates/calico.cfg.j2
+++ b/roles/calico/templates/10-calico.cfg.j2
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index b882a5597..7a1236392 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -1,7 +1,7 @@
[Unit]
Description=calico
-After=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
Restart=always
diff --git a/roles/calico/templates/calico.conf.j2 b/roles/calico/templates/calicoctl.conf.j2
index 3c8c6b046..3c8c6b046 100644
--- a/roles/calico/templates/calico.conf.j2
+++ b/roles/calico/templates/calicoctl.conf.j2
diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml
index db0d17884..5b324bce5 100644
--- a/roles/calico_master/defaults/main.yaml
+++ b/roles/calico_master/defaults/main.yaml
@@ -1,2 +1,6 @@
---
kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig"
+
+calicoctl_bin_dir: "/usr/local/bin/"
+
+calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl"
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 3358abe23..8ddca26d6 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Assure the calico certs have been generated
+- name: Calico Master | Assure the calico certs have been generated
stat:
path: "{{ item }}"
with_items:
@@ -7,17 +7,17 @@
- "{{ calico_etcd_cert_file}}"
- "{{ calico_etcd_key_file }}"
-- name: Create temp directory for policy controller definition
+- name: Calico Master | Create temp directory for policy controller definition
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
register: mktemp
changed_when: False
-- name: Write Calico Policy Controller definition
+- name: Calico Master | Write Calico Policy Controller definition
template:
dest: "{{ mktemp.stdout }}/calico-policy-controller.yml"
src: calico-policy-controller.yml.j2
-- name: Launch Calico Policy Controller
+- name: Calico Master | Launch Calico Policy Controller
command: >
{{ openshift.common.client_binary }} create
-f {{ mktemp.stdout }}/calico-policy-controller.yml
@@ -26,16 +26,23 @@
failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
changed_when: ('created' in calico_create_output.stdout)
-- name: Delete temp directory
+- name: Calico Master | Delete temp directory
file:
name: "{{ mktemp.stdout }}"
state: absent
changed_when: False
-- name: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico
+- name: Calico Master | oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:calico
oc_adm_policy_user:
user: system:serviceaccount:kube-system:calico
resource_kind: scc
resource_name: privileged
state: present
+
+- name: Download Calicoctl
+ become: yes
+ get_url:
+ url: "{{ calico_url_calicoctl }}"
+ dest: "{{ calicoctl_bin_dir }}"
+ mode: a+x
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 97b9762df..0847c92bc 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -105,7 +105,7 @@
- name: Docker | Restart docker
service:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: docker_updated|changed
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 8e4b66fbe..4506d2231 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,6 +1,6 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service
[Service]
ExecStart={{ bin_dir }}/aci_gw.sh start
diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2
index 566739f25..6e0a7a640 100644
--- a/roles/dns/templates/named.service.j2
+++ b/roles/dns/templates/named.service.j2
@@ -1,7 +1,7 @@
[Unit]
-Requires=docker.service
-After=docker.service
-PartOf=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
Type=simple
@@ -12,4 +12,4 @@ ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /
ExecStop=/usr/bin/docker stop bind
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/docker/README.md b/roles/docker/README.md
index ea06fd41a..4a9f21f22 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -1,7 +1,9 @@
Docker
=========
-Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+
+daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
@@ -11,8 +13,10 @@ Ansible 2.2
Role Variables
--------------
-udevw_udevd_dir: location of systemd config for systemd-udevd.service
+docker_conf_dir: location of the Docker configuration directory
+docker_systemd_dir location of the systemd directory for Docker
docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
+udevw_udevd_dir: location of systemd config for systemd-udevd.service
Dependencies
------------
@@ -26,6 +30,7 @@ Example Playbook
roles:
- role: docker
docker_udev_workaround: "true"
+ docker_use_system_container: False
License
-------
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 9ccb306fc..7f91afb37 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,7 +2,7 @@
- name: restart docker
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: not docker_service_status_changed | default(false) | bool
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index ad28cece9..cd4083572 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: os_firewall
+- role: lib_openshift
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index c34700aeb..0c2b16acf 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,119 +1,17 @@
---
-- name: Get current installed Docker version
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool
- register: curr_docker_version
- changed_when: false
-
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
-
-# Make sure Docker is installed, but does not update a running version.
-# Docker upgrades are handled by a separate playbook.
-- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
-
-- block:
- # Extend the default Docker service unit file when using iptables-services
- - name: Ensure docker.service.d directory exists
- file:
- path: "{{ docker_systemd_dir }}"
- state: directory
-
- - name: Configure Docker service unit file
- template:
- dest: "{{ docker_systemd_dir }}/custom.conf"
- src: custom.conf.j2
- when: not os_firewall_use_firewalld | default(True) | bool
+# These tasks dispatch to the proper set of docker tasks based on the
+# inventory:openshift_docker_use_system_container variable
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
-- stat: path=/etc/sysconfig/docker
- register: docker_check
-
-- name: Set registry params
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
- with_items:
- - reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- - reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- - reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
- notify:
- - restart docker
-
-- name: Set Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
- state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
- with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') }}"
- notify:
- - restart docker
- when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
-
-- name: Set various Docker options
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^OPTIONS=.*$'
- line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
- when: docker_check.stat.isreg is defined and docker_check.stat.isreg
- notify:
- - restart docker
-
-- name: Start the Docker service
- systemd:
- name: docker
- enabled: yes
- state: started
- daemon_reload: yes
- register: start_result
-
- set_fact:
- docker_service_status_changed: start_result | changed
+ l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
+
+- name: Use Package Docker if Requested
+ include: package_docker.yml
+ when: not l_use_system_container
-- meta: flush_handlers
+- name: Use System Container Docker if Requested
+ include: systemcontainer_docker.yml
+ when: l_use_system_container
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
new file mode 100644
index 000000000..e101730d2
--- /dev/null
+++ b/roles/docker/tasks/package_docker.yml
@@ -0,0 +1,116 @@
+---
+- name: Get current installed Docker version
+ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ when: not openshift.common.is_atomic | bool
+ register: curr_docker_version
+ changed_when: false
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+
+# Make sure Docker is installed, but does not update a running version.
+# Docker upgrades are handled by a separate playbook.
+- name: Install Docker
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+
+- block:
+ # Extend the default Docker service unit file when using iptables-services
+ - name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
+
+ - name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: not os_firewall_use_firewalld | default(False) | bool
+
+- stat: path=/etc/sysconfig/docker
+ register: docker_check
+
+- name: Set registry params
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ with_items:
+ - reg_conf_var: ADD_REGISTRY
+ reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
+ reg_flag: --add-registry
+ - reg_conf_var: BLOCK_REGISTRY
+ reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
+ reg_flag: --block-registry
+ - reg_conf_var: INSECURE_REGISTRY
+ reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
+ reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') }}"
+ notify:
+ - restart docker
+ when:
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+
+- name: Set various Docker options
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^OPTIONS=.*$'
+ line: "OPTIONS='\
+ {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
+ {% if docker_options is defined %} {{ docker_options }}{% endif %}\
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ when: docker_check.stat.isreg is defined and docker_check.stat.isreg
+ notify:
+ - restart docker
+
+- name: Start the Docker service
+ systemd:
+ name: docker
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+- set_fact:
+ docker_service_status_changed: start_result | changed
+
+- meta: flush_handlers
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
new file mode 100644
index 000000000..3af3e00b2
--- /dev/null
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -0,0 +1,162 @@
+---
+# If docker_options are provided we should fail. We should not install docker and ignore
+# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
+- name: Fail quickly if openshift_docker_options are set
+ assert:
+ that:
+ - docker_options is defined
+ - docker_options != ""
+ msg: |
+ Docker via System Container does not allow for the use of the openshift_docker_options
+ variable. If you want to use openshift_docker_options you will need to use the
+ traditional docker package install. Otherwise, comment out openshift_docker_options
+ in your inventory file.
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# Make sure Docker is installed so we are able to use the client
+- name: Install Docker so we can use the client
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+
+# Make sure docker is disabled. Errors are ignored.
+- name: Disable Docker
+ systemd:
+ name: docker
+ enabled: no
+ state: stopped
+ daemon_reload: yes
+ ignore_errors: True
+
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
+
+- block:
+
+ - name: Set to default prepend
+ set_fact:
+ l_docker_image_prepend: "gscrivano"
+
+ - name: Use Red Hat Registry for image when distribution is Red Hat
+ set_fact:
+ l_docker_image_prepend: "registry.access.redhat.com/openshift3"
+ when: ansible_distribution == 'RedHat'
+
+ - name: Use Fedora Registry for image when distribution is Fedora
+ set_fact:
+ l_docker_image_prepend: "registry.fedoraproject.org"
+ when: ansible_distribution == 'Fedora'
+
+ # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
+ - name: Use a testing registry if requested
+ set_fact:
+ l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_registry_override is defined
+ - openshift_docker_systemcontainer_image_registry_override != ""
+
+ - name: Set the full image name
+ set_fact:
+ l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+
+# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
+- name: Pre-pull Container Enginer System Container image
+ command: "atomic pull --storage ostree {{ l_docker_image }}"
+ changed_when: false
+ environment:
+ NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
+
+
+- name: Ensure container-engine.service.d directory exists
+ file:
+ path: "{{ container_engine_systemd_dir }}"
+ state: directory
+
+- name: Ensure /etc/docker directory exists
+ file:
+ path: "{{ docker_conf_dir }}"
+ state: directory
+
+- name: Install Container Enginer System Container
+ oc_atomic_container:
+ name: "{{ openshift.docker.service_name }}"
+ image: "{{ l_docker_image }}"
+ state: latest
+ values:
+ - "system-package=no"
+
+- name: Configure Container Engine Service File
+ template:
+ dest: "{{ container_engine_systemd_dir }}/custom.conf"
+ src: systemcontainercustom.conf.j2
+
+# Set local versions of facts that must be in json format for daemon.json
+# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson
+- set_fact:
+ l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
+ l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
+ l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}"
+ l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
+ l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
+
+# Configure container-engine using the daemon.json file
+- name: Configure Container Engine
+ template:
+ dest: "{{ docker_conf_dir }}/daemon.json"
+ src: daemon.json
+
+# Enable and start the container-engine service
+- name: Start the Container Engine service
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+- set_fact:
+ docker_service_status_changed: start_result | changed
+
+- meta: flush_handlers
diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json
new file mode 100644
index 000000000..a41b7cdbd
--- /dev/null
+++ b/roles/docker/templates/daemon.json
@@ -0,0 +1,20 @@
+{
+ "authorization-plugins": ["rhel-push-plugin"],
+ "default-runtime": "oci",
+ "containerd": "/run/containerd.sock",
+ "disable-legacy-registry": false,
+ "exec-opts": ["native.cgroupdriver=systemd"],
+ "insecure-registries": {{ l_docker_insecure_registries }},
+{% if docker_log_driver is defined %}
+ "log-driver": "{{ docker_log_driver }}",
+{%- endif %}
+ "log-opts": {{ l_docker_log_options }},
+ "runtimes": {
+ "oci": {
+ "path": "/usr/libexec/docker/docker-runc-current"
+ }
+ },
+ "selinux-enabled": {{ l_docker_selinux_enabled | lower }},
+ "add-registry": {{ l_docker_additional_registries }},
+ "block-registry": {{ l_docker_blocked_registries }}
+}
diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2
new file mode 100644
index 000000000..86eebfba6
--- /dev/null
+++ b/roles/docker/templates/systemcontainercustom.conf.j2
@@ -0,0 +1,17 @@
+# {{ ansible_managed }}
+
+[Service]
+{% if "http_proxy" in openshift.common %}
+Environment=HTTP_PROXY={{ docker_http_proxy }}
+{% endif -%}
+{% if "https_proxy" in openshift.common %}
+Environment=HTTPS_PROXY={{ docker_http_proxy }}
+{% endif -%}
+{% if "no_proxy" in openshift.common %}
+Environment=NO_PROXY={{ docker_no_proxy }}
+{% endif %}
+{%- if os_firewall_use_firewalld|default(false) %}
+[Unit]
+Wants=iptables.service
+After=iptables.service
+{%- endif %}
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
index 5237ed8f2..4e940b7f5 100644
--- a/roles/docker/vars/main.yml
+++ b/roles/docker/vars/main.yml
@@ -1,3 +1,5 @@
---
-udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
docker_systemd_dir: /etc/systemd/system/docker.service.d
+container_engine_systemd_dir: /etc/systemd/system/container-engine.service.d
+docker_conf_dir: /etc/docker/
+udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 29153f4df..e45f53219 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -13,5 +13,4 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_data_dir: /var/lib/etcd/
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh
deleted file mode 100644
index 0e324a8a9..000000000
--- a/roles/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
-# command flags are different between the two. Should work on stand
-# alone etcd hosts and master + etcd hosts too because we use the peer keys.
-etcdctl2() {
- /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
-}
-
-etcdctl3() {
- ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
-}
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index e0c70a181..689c07a84 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -24,3 +24,4 @@ dependencies:
- service: etcd peering
port: "{{ etcd_peer_port }}/tcp"
- role: etcd_server_certificates
+- role: etcd_common
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index c09da3b61..fa2f44609 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -10,51 +10,45 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
+- block:
+ - name: Pull etcd container
+ command: docker pull {{ openshift.etcd.etcd_image }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
when:
- etcd_is_containerized | bool
- not openshift.common.is_etcd_system_container | bool
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
- when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
-
-
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
-
-- name: Create configuration directory
- file:
- path: "{{ etcd_conf_dir }}"
- state: directory
- mode: 0700
- when: etcd_is_thirdparty | bool
+- block:
+ - name: Create configuration directory
+ file:
+ path: "{{ etcd_conf_dir }}"
+ state: directory
+ mode: 0700
# TODO: retest with symlink to confirm it does or does not function
-- name: Copy service file for etcd instance
- copy:
- src: /usr/lib/systemd/system/etcd.service
- dest: "/etc/systemd/system/{{ etcd_service }}.service"
- remote_src: True
- when: etcd_is_thirdparty | bool
-
-- name: Create third party etcd service.d directory exists
- file:
- path: "{{ etcd_systemd_dir }}"
- state: directory
- when: etcd_is_thirdparty | bool
-
-- name: Configure third part etcd service unit file
- template:
- dest: "{{ etcd_systemd_dir }}/custom.conf"
- src: custom.conf.j2
+ - name: Copy service file for etcd instance
+ copy:
+ src: /usr/lib/systemd/system/etcd.service
+ dest: "/etc/systemd/system/{{ etcd_service }}.service"
+ remote_src: True
+
+ - name: Create third party etcd service.d directory exists
+ file:
+ path: "{{ etcd_systemd_dir }}"
+ state: directory
+
+ - name: Configure third part etcd service unit file
+ template:
+ dest: "{{ etcd_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
when: etcd_is_thirdparty
# TODO: this task may not be needed with Validate permissions
@@ -80,28 +74,28 @@
command: systemctl daemon-reload
when: etcd_is_thirdparty | bool
-- name: Disable system etcd when containerized
- systemd:
- name: etcd
- state: stopped
- enabled: no
- masked: yes
- daemon_reload: yes
- when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
- register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
-
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
- when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool
-
-- name: Install Etcd system container
- include: system_container.yml
- when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool
+- block:
+ - name: Disable system etcd when containerized
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ when: not openshift.common.is_etcd_system_container | bool
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
+ when: not openshift.common.is_etcd_system_container | bool
+
+ - name: Install Etcd system container
+ include: system_container.yml
+ when: openshift.common.is_etcd_system_container | bool
+ when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
file:
@@ -126,7 +120,9 @@
enabled: yes
register: start_result
-- include: etcdctl.yml
+- include_role:
+ name: etcd_common
+ tasks_from: etcdctl.yml
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9151dd0bd..1b5598f46 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -62,7 +62,7 @@ ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
{% endif -%}
#[logging]
-ETCD_DEBUG="{{ etcd_debug | default(false) | string }}"
+ETCD_DEBUG="{{ etcd_debug | default(false) | bool | string }}"
{% if etcd_log_package_levels is defined %}
ETCD_LOG_PACKAGE_LEVELS="{{ etcd_log_package_levels }}"
{% endif %}
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index ae059b549..adeca7a91 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -1,17 +1,17 @@
[Unit]
Description=The Etcd Server container
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
-EnvironmentFile=/etc/etcd/etcd.conf
+EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
index 131a01490..d1c3a6602 100644
--- a/roles/etcd_common/README.md
+++ b/roles/etcd_common/README.md
@@ -1,17 +1,21 @@
etcd_common
========================
-TODO
+Common resources for dependent etcd roles. E.g. default variables for:
+* config directories
+* certificates
+* ports
+* other settings
-Requirements
-------------
-
-TODO
+Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g.
-Role Variables
---------------
+```yaml
+- delegated_serial_command:
+ command: /usr/bin/make_database.sh arg1 arg2
+ creates: /path/to/database
+```
-TODO
+Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).
Dependencies
------------
@@ -21,7 +25,22 @@ openshift-repos
Example Playbook
----------------
-TODO
+**Drop etcdctl aliases**
+
+```yaml
+- include_role:
+ name: etcd_common
+ tasks_from: etcdctl
+```
+
+**Get access to common variables**
+
+```yaml
+# meta.yml of etcd
+...
+dependencies:
+- { role: etcd_common }
+```
License
-------
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index c5efb0a0c..d12e6a07f 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -35,3 +35,6 @@ etcd_ip: "{{ ansible_default_ipv4.address }}"
etcd_is_atomic: False
etcd_is_containerized: False
etcd_is_thirdparty: False
+
+# etcd dir vars
+etcd_data_dir: /var/lib/etcd/
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd_common/tasks/etcdctl.yml
index 649ad23c1..6cb456677 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/etcdctl.yml
@@ -4,9 +4,9 @@
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
- copy:
- src: etcdctl.sh
- dest: /etc/profile.d/etcdctl.sh
+ template:
+ dest: "/etc/profile.d/etcdctl.sh"
+ src: etcdctl.sh.j2
mode: 0755
owner: root
group: root
diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd_common/templates/etcdctl.sh.j2
new file mode 100644
index 000000000..ac7d9c72f
--- /dev/null
+++ b/roles/etcd_common/templates/etcdctl.sh.j2
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
+# command flags are different between the two. Should work on stand
+# alone etcd hosts and master + etcd hosts too because we use the peer keys.
+etcdctl2() {
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@}
+
+}
+
+etcdctl3() {
+ ETCDCTL_API=3 /usr/bin/etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@}
+}
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
index 98c913dba..b453f2bd8 100644
--- a/roles/etcd_server_certificates/meta/main.yml
+++ b/roles/etcd_server_certificates/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: openshift_etcd_ca
+- role: etcd_ca
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 94d1d18fb..c60c2115a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -5,4 +5,6 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 4d083c4d5..a6273cfe4 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -166,7 +166,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -900,6 +900,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1073,7 +1080,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1527,6 +1534,10 @@ class CAServerCert(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
+ # Filter non-strings from hostnames list s.t. the omit filter
+ # may be used to conditionally add a hostname.
+ params['hostnames'] = [host for host in params['hostnames'] if isinstance(host, string_types)]
+
config = CAServerCertConfig(params['kubeconfig'],
params['debug'],
{'cert': {'value': params['cert'], 'include': True},
@@ -1576,6 +1587,10 @@ class CAServerCert(OpenShiftCLI):
# -*- -*- -*- Begin included fragment: ansible/oc_adm_ca_server_cert.py -*- -*- -*-
+
+# pylint: disable=wrong-import-position
+from ansible.module_utils.six import string_types
+
def main():
'''
ansible oc adm module for ca create-server-cert
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 48e80a7cd..7493b5c3d 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -152,7 +152,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -886,6 +886,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1059,7 +1066,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 35168d1a3..5e72f5954 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1045,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 5f7e4b8fa..371a3953b 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -872,6 +872,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1045,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1960,7 +1967,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index a6718d921..7240521c6 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -256,7 +256,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -990,6 +990,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1163,7 +1170,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -2531,25 +2538,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 0e4b336fb..a54c62cd4 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -281,7 +281,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1015,6 +1015,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1188,7 +1195,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index a34ce351e..78c72ef26 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -130,7 +130,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -864,6 +864,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1037,7 +1044,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1531,10 +1538,10 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
@@ -1633,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -1669,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -1738,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 69dd23a0e..c88f56fc6 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -136,7 +136,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -870,6 +870,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1043,7 +1050,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 70329ccfe..17e3f7dde 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1087,7 +1094,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index bda5eebc5..18ab97bc0 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -147,7 +147,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -881,6 +881,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1054,7 +1061,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 462e14868..88c6ef209 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -120,7 +120,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -854,6 +854,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1027,7 +1034,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 8aed060bb..45860cbe5 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -139,7 +139,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -873,6 +873,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1046,7 +1053,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 0d18a7afe..65923a698 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -156,7 +156,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -890,6 +890,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1063,7 +1070,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 0b01670c6..1d75a21b9 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -98,7 +98,7 @@ options:
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
@@ -159,7 +159,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -893,6 +893,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1066,7 +1073,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1539,12 +1546,9 @@ class OCObject(OpenShiftCLI):
# Delete
########
if state == 'absent':
- # if we were passed a name, verify its not in our results
- if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': state}
-
- # verify results are empty for the selector
- if params['selector'] is not None and len(api_rval['results']) == 0:
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 9b321b47c..72add01f4 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -91,7 +91,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -825,6 +825,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -998,7 +1005,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1391,8 +1398,10 @@ class OCObjectValidator(OpenShiftCLI):
# check if it uses a reserved name
name = namespace['metadata']['name']
if not any((name == 'kube',
+ name == 'kubernetes',
name == 'openshift',
name.startswith('kube-'),
+ name.startswith('kubernetes-'),
name.startswith('openshift-'),)):
return False
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 34f80ce13..8e1ffe90f 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -148,7 +148,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -882,6 +882,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1055,7 +1062,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 331f31e41..a06852fd8 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -145,7 +145,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -879,6 +879,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1052,7 +1059,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 3e4601cc3..79673452d 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -140,7 +140,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -874,6 +874,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1047,7 +1054,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 755ab3b02..ad705a6c5 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -190,7 +190,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -924,6 +924,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1097,7 +1104,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 0c83338b0..291ac8b19 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -134,7 +134,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -868,6 +868,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1041,7 +1048,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 26e52a926..df28df2bc 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -914,6 +914,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1087,7 +1094,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 440cda1b3..e98f83cc3 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -186,7 +186,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -920,6 +920,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1093,7 +1100,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 5eb36ee32..f00e9e4f6 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1039,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 1bc788e87..6691495a6 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -866,6 +866,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1039,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 3009e661a..72f2fbf03 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -192,7 +192,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -926,6 +926,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1099,7 +1106,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 88f295a74..bc3340a94 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -104,7 +104,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -838,6 +838,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1011,7 +1018,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 5f936fb49..9dec0a6d4 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -80,6 +80,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
@@ -169,7 +181,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -903,6 +915,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -1076,7 +1095,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
index 10f1c9b4b..fc394cb43 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_ca_server_cert.py
@@ -1,6 +1,10 @@
# pylint: skip-file
# flake8: noqa
+
+# pylint: disable=wrong-import-position
+from ansible.module_utils.six import string_types
+
def main():
'''
ansible oc adm module for ca create-server-cert
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
index cf99a6584..37a64e4ef 100644
--- a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -96,6 +96,10 @@ class CAServerCert(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
+ # Filter non-strings from hostnames list s.t. the omit filter
+ # may be used to conditionally add a hostname.
+ params['hostnames'] = [host for host in params['hostnames'] if isinstance(host, string_types)]
+
config = CAServerCertConfig(params['kubeconfig'],
params['debug'],
{'cert': {'value': params['cert'], 'include': True},
diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py
index 88fcc1ddc..37a685ebb 100644
--- a/roles/lib_openshift/src/class/oc_adm_policy_user.py
+++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py
@@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI):
@property
def policybindings(self):
if self._policy_bindings is None:
- results = self._get('clusterpolicybindings', None)
+ results = self._get('policybindings', None)
if results['returncode'] != 0:
raise OpenShiftCLIError('Could not retrieve policybindings')
self._policy_bindings = results['results'][0]['items'][0]
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 720b44cdc..3c130fe28 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -331,25 +331,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py
index 1d3d977db..ae6795446 100644
--- a/roles/lib_openshift/src/class/oc_clusterrole.py
+++ b/roles/lib_openshift/src/class/oc_clusterrole.py
@@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI):
@property
def clusterrole(self):
''' property for clusterrole'''
- if not self._clusterrole:
+ if self._clusterrole is None:
self.get()
return self._clusterrole
@@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI):
elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']:
result['returncode'] = 0
+ self.clusterrole = None
return result
@@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI):
# Create it here
api_rval = oc_clusterrole.create()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
# return the created object
api_rval = oc_clusterrole.get()
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 667b98eac..6f0da3d5c 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -115,12 +115,9 @@ class OCObject(OpenShiftCLI):
# Delete
########
if state == 'absent':
- # if we were passed a name, verify its not in our results
- if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']):
- return {'changed': False, 'state': state}
-
- # verify results are empty for the selector
- if params['selector'] is not None and len(api_rval['results']) == 0:
+ # verify its not in our results
+ if (params['name'] is not None or params['selector'] is not None) and \
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/class/oc_objectvalidator.py b/roles/lib_openshift/src/class/oc_objectvalidator.py
index 43f6cac67..c9fd3b532 100644
--- a/roles/lib_openshift/src/class/oc_objectvalidator.py
+++ b/roles/lib_openshift/src/class/oc_objectvalidator.py
@@ -35,8 +35,10 @@ class OCObjectValidator(OpenShiftCLI):
# check if it uses a reserved name
name = namespace['metadata']['name']
if not any((name == 'kube',
+ name == 'kubernetes',
name == 'openshift',
name.startswith('kube-'),
+ name.startswith('kubernetes-'),
name.startswith('openshift-'),)):
return False
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
index e44843eb3..4ff912b2d 100644
--- a/roles/lib_openshift/src/doc/obj
+++ b/roles/lib_openshift/src/doc/obj
@@ -47,7 +47,7 @@ options:
aliases: []
kind:
description:
- - The kind attribute of the object. e.g. dc, bc, svc, route
+ - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc".
required: True
default: None
aliases: []
diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume
index 1d04afeef..43ff78c9f 100644
--- a/roles/lib_openshift/src/doc/volume
+++ b/roles/lib_openshift/src/doc/volume
@@ -29,6 +29,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 1868b1420..2bf795e25 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -76,6 +76,13 @@ class OpenShiftCLI(object):
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
@@ -249,7 +256,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py
index 4590dcf90..fe5ed9723 100644
--- a/roles/lib_openshift/src/lib/rule.py
+++ b/roles/lib_openshift/src/lib/rule.py
@@ -136,9 +136,9 @@ class Rule(object):
results = []
for rule in inc_rules:
- results.append(Rule(rule['apiGroups'],
- rule['attributeRestrictions'],
- rule['resources'],
- rule['verbs']))
+ results.append(Rule(rule.get('apiGroups', ['']),
+ rule.get('attributeRestrictions', None),
+ rule.get('resources', []),
+ rule.get('verbs', [])))
return results
diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
index 6990a11a8..f350bd25d 100644
--- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
+++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in testing
'''
diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml
index b4e721407..22cf687c5 100755
--- a/roles/lib_openshift/src/test/integration/oc_label.yml
+++ b/roles/lib_openshift/src/test/integration/oc_label.yml
@@ -15,7 +15,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} not defined"
- when: "{{ item }} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml
index ad1f9d188..9b4290052 100755
--- a/roles/lib_openshift/src/test/integration/oc_user.yml
+++ b/roles/lib_openshift/src/test/integration/oc_user.yml
@@ -14,7 +14,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} no defined"
- when: "{{ item}} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index bab36fddc..97cf86170 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase):
}
]}'''
+ @mock.patch('oc_adm_registry.locate_oc_binary')
@mock.patch('oc_adm_registry.Utils._write')
@mock.patch('oc_adm_registry.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_registry.Registry._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing state present '''
params = {'state': 'present',
'debug': False,
@@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase):
(0, '', ''),
]
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- '/tmp/mocked_kubeconfig',
- ]
+ mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig'
+
+ mock_oc_binary.return_value = 'oc'
results = Registry.run_ansible(params, False)
@@ -254,7 +254,7 @@ class RegistryTest(unittest.TestCase):
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False',
+ mock.call(['oc', 'adm', 'registry',
'--ports=5000', '--replicas=1', '--selector=type=infra',
'--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 51393dbaf..5481ac623 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase):
]
}'''
+ @mock.patch('oc_adm_router.locate_oc_binary')
@mock.patch('oc_adm_router.Utils._write')
@mock.patch('oc_adm_router.Utils.create_tmpfile_copy')
@mock.patch('oc_adm_router.Router._run')
- def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write):
+ def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary):
''' Testing a create '''
params = {'state': 'present',
'debug': False,
@@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
results = Router.run_ansible(params, False)
self.assertTrue(results['changed'])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
index da326742f..b19a5a880 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase):
maxDiff = None
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are empty '''
# Arrange
@@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc',
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when we fail to get objects '''
# Arrange
@@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
error_results = {
'returncode': 1,
'stderr': 'Error.',
@@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when both all objects are valid '''
# Arrange
@@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
@@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase):
mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
])
+ @mock.patch('oc_objectvalidator.locate_oc_binary')
@mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
@mock.patch('oc_objectvalidator.OCObjectValidator._run')
- def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
''' Testing when all objects are invalid '''
# Arrange
@@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase):
'/tmp/mocked_kubeconfig',
]
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
# Act
results = OCObjectValidator.run_ansible(params)
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index ee98470b0..cf33e48d5 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -421,15 +422,16 @@ class RepoqueryCLI(object):
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -437,6 +439,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -448,6 +452,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -519,6 +526,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -541,6 +562,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -552,6 +576,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
@@ -592,6 +617,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index 9adaeeb52..baf72fe47 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -212,7 +213,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index cb4efa6c1..40773b1c1 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -18,6 +18,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/src/class/repoquery.py b/roles/lib_utils/src/class/repoquery.py
index 82adcada5..28e3a3e89 100644
--- a/roles/lib_utils/src/class/repoquery.py
+++ b/roles/lib_utils/src/class/repoquery.py
@@ -5,15 +5,16 @@
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -21,6 +22,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -32,6 +35,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -103,6 +109,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -125,6 +145,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -136,6 +159,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index e0a27012f..957c35a06 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -11,7 +11,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index b0ab7c92c..567f8c9e0 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -9,6 +9,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/test/unit/test_repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index e39d9d83f..9991ecd14 100755
--- a/roles/lib_utils/src/test/unit/test_repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
@@ -37,6 +37,7 @@ class RepoQueryTest(unittest.TestCase):
'verbose': False,
'show_duplicates': False,
'match_version': None,
+ 'ignore_excluders': False,
}
valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 3b17d9ed6..c7b906949 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -95,7 +95,7 @@
{% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 5f102e960..a2bc9ecdb 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom filters for use in openshift-ansible
"""
@@ -35,7 +34,7 @@ Example playbook usage:
become: no
run_once: yes
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
copy:
content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
dest: "{{ openshift_certificate_expiry_json_results_path }}"
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index c204b5341..0242f5b43 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -135,7 +135,7 @@ platforms missing the Python OpenSSL library.
continue
elif l.startswith('Subject:'):
- # O=system:nodes, CN=system:node:m01.example.com
+ # O = system:nodes, CN = system:node:m01.example.com
self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
def get_serial_number(self):
@@ -202,7 +202,7 @@ object"""
"""
self.subjects = []
for s in subject_string.split(', '):
- name, _, value = s.partition('=')
+ name, _, value = s.partition(' = ')
self.subjects.append((name, value))
def get_components(self):
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index 139d5de6e..b5234bd1e 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -13,12 +13,12 @@
src: cert-expiry-table.html.j2
dest: "{{ openshift_certificate_expiry_html_report_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_generate_html_report|bool }}"
+ when: openshift_certificate_expiry_generate_html_report|bool
- name: Generate the result JSON string
run_once: yes
set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
become: no
@@ -27,4 +27,4 @@
src: save_json_results.j2
dest: "{{ openshift_certificate_expiry_json_results_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
index ccdd48fa8..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402
@pytest.fixture(scope='module')
def fake_valid_cert(valid_cert):
- cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text']
+ cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text',
+ '-nameopt', 'oneline']
cert = subprocess.check_output(cmd)
return FakeOpenSSLCertificate(cert.decode('utf8'))
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 4ed3e1f01..57ac16602 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -1,8 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=missing-docstring,invalid-name
-#
import random
import tempfile
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index f22dd4520..5788e6d74 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -7,4 +7,4 @@
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
src: openstack.conf.j2
- when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
+ when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index d9ccf87bc..51313a258 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -28,10 +28,18 @@
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
- fail:
- msg: Calico cannot currently be used with Flannel in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
- fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+- fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+- fail:
msg: openshift_hostname must be 64 characters or less
when: openshift_hostname is defined and openshift_hostname | length > 64
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 049ceffe0..350512452 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -16,6 +16,7 @@
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
+ use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
- set_fact:
docker_additional_registries: "{{ openshift.docker.additional_registries
diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_etcd_ca/tasks/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 0f2bec6d3..c7e51bbfc 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.3.6
-ORIGIN_VERSION=${1:-v1.6}
+ORIGIN_VERSION=${1:-v3.6}
RHAMP_TAG=1.0.0.GA
RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
index 536385712..08751d131 120000
--- a/roles/openshift_examples/files/examples/latest
+++ b/roles/openshift_examples/files/examples/latest
@@ -1 +1 @@
-v1.6 \ No newline at end of file
+v3.6 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
index 1a90a9409..a81dbb654 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
@@ -800,7 +800,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
index eb94c3bb4..2ed0efe1e 100644
--- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
@@ -707,7 +707,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
index 14bdd1dca..14bdd1dca 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
index 709d8d976..709d8d976 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
index 4f25a9c8f..4f25a9c8f 100644
--- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml
+++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
index a36d7ba7d..a36d7ba7d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
index f347f1f9f..f347f1f9f 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
index 6ed744777..6ed744777 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
index 97a8abf6d..97a8abf6d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
index 0656219fb..0656219fb 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
index d60b4647d..d60b4647d 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
index c2bfa40fd..c2bfa40fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
index 7a16e742a..7a16e742a 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
index 242212d6f..242212d6f 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
index e9af50937..e9af50937 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
index aa27578a9..aa27578a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
index 857ffa980..857ffa980 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index 1a90a9409..a81dbb654 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -800,7 +800,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index eb94c3bb4..2ed0efe1e 100644
--- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -707,7 +707,7 @@
"openshift.io/display-name": "Jenkins 1.X",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
- "tags": "jenkins",
+ "tags": "hidden,jenkins",
"version": "1.x"
},
"from": {
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
index f48d8d4a8..f48d8d4a8 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
index 34f5fcbcc..34f5fcbcc 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
index eb3d296be..eb3d296be 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
index da2454d2e..da2454d2e 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..81ae63416 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 7a285dba8..7a285dba8 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..9f982c286 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 7bee85ddd..7bee85ddd 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
index a09d71a00..a09d71a00 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
index fa31f7f61..a2b59c2d3 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json
@@ -19,6 +19,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-password": "${DATABASE_PASSWORD}",
+ "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -209,7 +220,12 @@
"env": [
{
"name": "ConnectionString",
- "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "connect-string"
+ }
+ }
}
],
"resources": {
@@ -373,7 +389,12 @@
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef": {
+ "name": "${NAME}",
+ "key": "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
index 264e4b2de..264e4b2de 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
index b47bdf353..b47bdf353 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..6ee999cb1 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..5c177a7e0 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
index b400cfdb3..b400cfdb3 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
index fa67412ff..fa67412ff 100644
--- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
index 9d99973be..9d99973be 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
index 049f3f884..049f3f884 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
index ab35afead..ab35afead 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
index c12f06dec..c12f06dec 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
index 897ce0395..897ce0395 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
index 97d110286..97d110286 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
index 56e76016f..56e76016f 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
index 639ac2e11..639ac2e11 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
index 22ca3f0a0..22ca3f0a0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
index e1a585d24..e1a585d24 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
index 12720eb19..12720eb19 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
index da8015fb0..da8015fb0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
index 7d64dac98..7d64dac98 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
index 1e7c03b99..1e7c03b99 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
index 07f926ff3..07f926ff3 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
index 754a3b4c0..754a3b4c0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
index 8be4ac90b..8be4ac90b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
index bf9047599..bf9047599 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
index 51e667e02..51e667e02 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
index c5f0d006a..c5f0d006a 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
index 3db0e4c84..3db0e4c84 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
index 72dbb4302..72dbb4302 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
index 9dd847451..9dd847451 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
index 7b1800b7b..7b1800b7b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
index 31716d84c..31716d84c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
index 212431056..212431056 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
index 13fbbdd93..13fbbdd93 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
index 69fdec206..69fdec206 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
index 2bd3c249f..2bd3c249f 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 31f245950..31f245950 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
index eac964697..eac964697 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
index 09023be71..09023be71 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
index f08cdf2f9..f08cdf2f9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
index 3ca9e9fab..3ca9e9fab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
index 83b4d5b24..83b4d5b24 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
index 1292442a4..1292442a4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
index 99db77d58..99db77d58 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
index c8150c231..c8150c231 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
index f8e5c2b04..f8e5c2b04 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
index 1edeb62e7..1edeb62e7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
index d11df06ee..d11df06ee 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
index 6b7f6d707..6b7f6d707 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
index 811602220..811602220 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 413a6de87..413a6de87 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
index 610ea9441..610ea9441 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 6ef9d6e4c..6ef9d6e4c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index 9b48f8ae7..9b48f8ae7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index 30af703ce..30af703ce 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index c2843af63..c2843af63 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index b8372f374..b8372f374 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index cd5bb9fa4..cd5bb9fa4 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
index cb1e49d29..cb1e49d29 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
index 21d5662c7..21d5662c7 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 34657d826..34657d826 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index 974cfaddb..974cfaddb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index 7a8231cc5..7a8231cc5 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index cda21f237..cda21f237 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index 4dfc98015..4dfc98015 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index f6c85668c..f6c85668c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
index cd0bec3c1..cd0bec3c1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
index 2ecce08a9..2ecce08a9 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
index d80939efb..d80939efb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
index f99099868..f99099868 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
index 143e16756..143e16756 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
index 1dea463ac..1dea463ac 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
index 42264585b..42264585b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
index f6d0c99ed..f6d0c99ed 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
index 41c726cf0..41c726cf0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
index 170c919cb..170c919cb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
index 89d0db1a6..89d0db1a6 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
index 26cab29f8..26cab29f8 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
index 32a512829..32a512829 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
index 55e2199bb..55e2199bb 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
index 8b3cd6ed0..8b3cd6ed0 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
index bc5bbad22..bc5bbad22 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
index e54fa0d59..e54fa0d59 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
index 20ba97dac..20ba97dac 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
index 555647fab..555647fab 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
index cf9a4e903..cf9a4e903 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
index c78a96f7c..c78a96f7c 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
index 620425902..620425902 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
index 15cfc93fd..15cfc93fd 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
index c70ee7726..c70ee7726 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
index fb0578a67..fb0578a67 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
index dcbb24bf1..dcbb24bf1 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
index 1768f7a1b..1768f7a1b 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
index 4c2f81f2e..4c2f81f2e 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json
diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
index d8402ef72..d8402ef72 100644
--- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index e048bd107..80cb88d45 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -1,47 +1,69 @@
OpenShift Excluder
-================
+==================
Manages the excluder packages which add yum and dnf exclusions ensuring that
-the packages we care about are not inadvertantly updated. See
+the packages we care about are not inadvertently updated. See
https://github.com/openshift/origin/tree/master/contrib/excluder
Requirements
------------
-openshift_facts
+None
-Facts
------
+Inventory Variables
+-------------------
-| Name | Default Value | Description |
------------------------------|---------------|----------------------------------------|
-| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
-| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
-| enable_excluders | None | Enable all excluders
+| Name | Default Value | Description |
+---------------------------------------|----------------------------|----------------------------------------|
+| openshift_enable_excluders | True | Enable all excluders |
+| openshift_enable_docker_excluder | openshift_enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
+| openshift_enable_openshift_excluder | openshift_enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
Role Variables
--------------
-None
+
+| Name | Default | Choices | Description |
+|-------------------------------------------|---------|-----------------|---------------------------------------------------------------------------|
+| r_openshift_excluder_action | enable | enable, disable | Action to perform when calling this role |
+| r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role |
+| r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package |
+| r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package |
+| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift |
+| r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'|
Dependencies
------------
-Tasks to include
-----------------
-
-- exclude: enable excluders (assuming excluders are installed)
-- unexclude: disable excluders (assuming excluders are installed)
-- install: install excluders (installation is followed by excluder enabling)
-- enable: enable excluders (optionally with installation step)
-- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
-- status: determine status of excluders
+- lib_utils
Example Playbook
----------------
+```yaml
+- name: Demonstrate OpenShift Excluder usage
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ # Disable all excluders
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ # Enable all excluders
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ # Disable all excluders and verify appropriate excluder packages are available for upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
+```
TODO
----
+
It should be possible to manage the two excluders independently though that's not a hard requirement. However it should be done to manage docker on RHEL Containerized hosts.
License
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
index 7c3ae2a86..d4f151142 100644
--- a/roles/openshift_excluder/defaults/main.yml
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -1,6 +1,19 @@
---
# keep the 'current' package or update to 'latest' if available?
-openshift_excluder_package_state: present
-docker_excluder_package_state: present
+r_openshift_excluder_package_state: present
+r_openshift_excluder_docker_package_state: present
-enable_excluders: true
+# Legacy variables are included for backwards compatibility with v3.5
+# Inventory variables Legacy
+# openshift_enable_excluders enable_excluders
+# openshift_enable_openshift_excluder enable_openshift_excluder
+# openshift_enable_docker_excluder enable_docker_excluder
+r_openshift_excluder_enable_excluders: "{{ openshift_enable_excluders | default(enable_excluders) | default(true) }}"
+r_openshift_excluder_enable_openshift_excluder: "{{ openshift_enable_openshift_excluder | default(enable_openshift_excluder) | default(r_openshift_excluder_enable_excluders) }}"
+r_openshift_excluder_enable_docker_excluder: "{{ openshift_enable_docker_excluder | default(enable_docker_excluder) | default(r_openshift_excluder_enable_excluders) }}"
+
+# Default action when calling this role
+r_openshift_excluder_action: enable
+
+# When upgrading, this variable should be set to true when calling the role
+r_openshift_excluder_verify_upgrade: false
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 4d1c1efca..871081c19 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: Scott Dodson
- description: OpenShift Examples
+ description: OpenShift Excluder
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.2
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- { role: openshift_facts }
-- { role: openshift_repos }
+- role: lib_utils
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 97044fff6..8d5a08874 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -1,47 +1,38 @@
---
-# input variables
-# - excluder_package_state
-# - docker_excluder_package_state
-- include: init.yml
+- when: r_openshift_excluder_verify_upgrade
+ block:
+ - name: Include verify_upgrade.yml when upgrading
+ include: verify_upgrade.yml
# unexclude the current openshift/origin-excluder if it is installed so it can be updated
-- include: unexclude.yml
+- name: Disable OpenShift excluder so it can be updated
+ include: unexclude.yml
vars:
unexclude_docker_excluder: false
- unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when:
- - not openshift.common.is_atomic | bool
+ unexclude_openshift_excluder: "{{ r_openshift_excluder_enable_openshift_excluder }}"
# Install any excluder that is enabled
-- include: install.yml
- vars:
- # Both docker_excluder_on and openshift_excluder_on are set in openshift_excluder->init task
- install_docker_excluder: "{{ docker_excluder_on | bool }}"
- install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when: docker_excluder_on or openshift_excluder_on
-
- # if the docker excluder is not enabled, we don't care about its status
- # it the docker excluder is enabled, we install it and in case its status is non-zero
- # it is enabled no matter what
+- name: Include install.yml
+ include: install.yml
# And finally adjust an excluder in order to update host components correctly. First
# exclude then unexclude
-- block:
- - include: exclude.yml
- vars:
- # Enable the docker excluder only if it is overrided
- # BZ #1430612: docker excluders should be enabled even during installation and upgrade
- exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
- # excluder is to be disabled by default
- exclude_openshift_excluder: false
- # All excluders that are to be disabled are disabled
- - include: unexclude.yml
- vars:
- # If the docker override is not set, default to the generic behaviour
- # BZ #1430612: docker excluders should be enabled even during installation and upgrade
- unexclude_docker_excluder: false
- # disable openshift excluder is never overrided to be enabled
- # disable it if the docker excluder is enabled
- unexclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when:
- - not openshift.common.is_atomic | bool
+- name: Include exclude.yml
+ include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overridden
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ exclude_docker_excluder: "{{ r_openshift_excluder_enable_docker_excluder }}"
+ # excluder is to be disabled by default
+ exclude_openshift_excluder: false
+
+# All excluders that are to be disabled are disabled
+- name: Include unexclude.yml
+ include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ # BZ #1430612: docker excluders should be enabled even during installation and upgrade
+ unexclude_docker_excluder: false
+ # disable openshift excluder is never overridden to be enabled
+ # disable it if the docker excluder is enabled
+ unexclude_openshift_excluder: "{{ r_openshift_excluder_enable_openshift_excluder }}"
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
index e719325bc..fce44cfb5 100644
--- a/roles/openshift_excluder/tasks/enable.yml
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -1,18 +1,6 @@
---
-# input variables:
-- block:
- - include: init.yml
+- name: Install excluders
+ include: install.yml
- - include: install.yml
- vars:
- install_docker_excluder: "{{ docker_excluder_on | bool }}"
- install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
- when: docker_excluder_on or openshift_excluder_on | bool
-
- - include: exclude.yml
- vars:
- exclude_docker_excluder: "{{ docker_excluder_on | bool }}"
- exclude_openshift_excluder: "{{ openshift_excluder_on | bool }}"
-
- when:
- - not openshift.common.is_atomic | bool
+- name: Enable excluders
+ include: exclude.yml
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index ca18d343f..934f1b2d2 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,30 +1,22 @@
---
-# input variables:
-# - exclude_docker_excluder
-# - exclude_openshift_excluder
-- block:
+- name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-docker-excluder
+ register: docker_excluder_stat
- - name: Check for docker-excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-docker-excluder
- register: docker_excluder_stat
- - name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when:
- - exclude_docker_excluder | default(false) | bool
- - docker_excluder_stat.stat.exists
+- name: Enable docker excluder
+ command: "{{ r_openshift_excluder_service_type }}-docker-excluder exclude"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+ - docker_excluder_stat.stat.exists
- - name: Check for openshift excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-excluder
- register: openshift_excluder_stat
- - name: Enable openshift excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- # if the openshift override is set, it means the openshift excluder is disabled no matter what
- # if the openshift override is not set, the excluder is set based on enable_openshift_excluder
- when:
- - exclude_openshift_excluder | default(false) | bool
- - openshift_excluder_stat.stat.exists
+- name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-excluder
+ register: openshift_excluder_stat
+- name: Enable openshift excluder
+ command: "{{ r_openshift_excluder_service_type }}-excluder exclude"
when:
- - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_enable_openshift_excluder | bool
+ - openshift_excluder_stat.stat.exists
diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml
deleted file mode 100644
index 1ea18f363..000000000
--- a/roles/openshift_excluder/tasks/init.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Evalute if docker excluder is to be enabled
- set_fact:
- docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders) | bool }}"
-
-- debug: var=docker_excluder_on
-
-- name: Evalute if openshift excluder is to be enabled
- set_fact:
- openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders) | bool }}"
-
-- debug: var=openshift_excluder_on
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3490a613e..d09358bee 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,21 +1,14 @@
---
-# input Variables
-# - install_docker_excluder
-# - install_openshift_excluder
-- block:
-
- - name: Install docker excluder
- package:
- name: "{{ openshift.common.service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ docker_excluder_package_state }}"
- when:
- - install_docker_excluder | default(true) | bool
+- name: Install docker excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
- - name: Install openshift excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
- state: "{{ openshift_excluder_package_state }}"
- when:
- - install_openshift_excluder | default(true) | bool
+- name: Install openshift excluder
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ state: "{{ r_openshift_excluder_package_state }}"
when:
- - not openshift.common.is_atomic | bool
+ - r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
new file mode 100644
index 000000000..db20b4012
--- /dev/null
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+- name: Detecting Atomic Host Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+- block:
+
+ - name: Debug r_openshift_excluder_enable_docker_excluder
+ debug:
+ var: r_openshift_excluder_enable_docker_excluder
+
+ - name: Debug r_openshift_excluder_enable_openshift_excluder
+ debug:
+ var: r_openshift_excluder_enable_openshift_excluder
+
+ - name: Fail if invalid openshift_excluder_action provided
+ fail:
+ msg: "openshift_excluder role can only be called with 'enable' or 'disable'"
+ when: r_openshift_excluder_action not in ['enable', 'disable']
+
+ - name: Fail if r_openshift_excluder_service_type is not defined
+ fail:
+ msg: "r_openshift_excluder_service_type must be specified for this role"
+ when: r_openshift_excluder_service_type is not defined
+
+ - name: Fail if r_openshift_excluder_upgrade_target is not defined
+ fail:
+ msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades"
+ when:
+ - r_openshift_excluder_verify_upgrade | bool
+ - r_openshift_excluder_upgrade_target is not defined
+
+ - name: Include main action task file
+ include: "{{ r_openshift_excluder_action }}.yml"
+
+ when:
+ - not ostree_booted.stat.exists | bool
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 4df7f14b4..a5ce8d5c7 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -2,27 +2,25 @@
# input variables:
# - unexclude_docker_excluder
# - unexclude_openshift_excluder
-- block:
- - name: Check for docker-excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-docker-excluder
- register: docker_excluder_stat
- - name: disable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
- when:
- - unexclude_docker_excluder | default(false) | bool
- - docker_excluder_stat.stat.exists
+- name: Check for docker-excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-docker-excluder
+ register: docker_excluder_stat
- - name: Check for openshift excluder
- stat:
- path: /sbin/{{ openshift.common.service_type }}-excluder
- register: openshift_excluder_stat
- - name: disable openshift excluder
- command: "{{ openshift.common.service_type }}-excluder unexclude"
- when:
- - unexclude_openshift_excluder | default(false) | bool
- - openshift_excluder_stat.stat.exists
+- name: disable docker excluder
+ command: "{{ r_openshift_excluder_service_type }}-docker-excluder unexclude"
+ when:
+ - unexclude_docker_excluder | default(false) | bool
+ - docker_excluder_stat.stat.exists
+
+- name: Check for openshift excluder
+ stat:
+ path: /sbin/{{ r_openshift_excluder_service_type }}-excluder
+ register: openshift_excluder_stat
+- name: disable openshift excluder
+ command: "{{ r_openshift_excluder_service_type }}-excluder unexclude"
when:
- - not openshift.common.is_atomic | bool
+ - unexclude_openshift_excluder | default(false) | bool
+ - openshift_excluder_stat.stat.exists
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
new file mode 100644
index 000000000..c35639c1b
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -0,0 +1,32 @@
+---
+# input variables:
+# - excluder
+- name: Get available excluder version
+ repoquery:
+ name: "{{ excluder }}"
+ ignore_excluders: true
+ register: repoquery_out
+
+- name: Fail when excluder package is not found
+ fail:
+ msg: "Package {{ excluder }} not found"
+ when: not repoquery_out.results.package_found
+
+- name: Set fact excluder_version
+ set_fact:
+ excluder_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+
+- name: "{{ excluder }} version detected"
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version }}"
+
+- name: Printing upgrade target version
+ debug:
+ msg: "{{ r_openshift_excluder_upgrade_target }}"
+
+- name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version"
+ when:
+ - excluder_version != ''
+ - excluder_version.split('.')[0:2] | join('.') | version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
new file mode 100644
index 000000000..42026664a
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -0,0 +1,12 @@
+---
+- name: Verify Docker Excluder version
+ include: verify_excluder.yml
+ vars:
+ excluder: "{{ r_openshift_excluder_service_type }}-docker-excluder"
+ when: r_openshift_excluder_enable_docker_excluder | bool
+
+- name: Verify OpenShift Excluder version
+ include: verify_excluder.yml
+ vars:
+ excluder: "{{ r_openshift_excluder_service_type }}-excluder"
+ when: r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 00603f4fa..4cb5418c6 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+ failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift.common.is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 7edf141e5..514c06500 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
@@ -911,7 +910,7 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0')
+ version_gte_3_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
@@ -919,25 +918,26 @@ def set_version_facts_if_unset(facts):
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
- version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_6 = version >= LooseVersion('3.6.0')
else:
+ # 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = True
version_gte_3_5_or_1_5 = True
- version_gte_3_6_or_1_6 = False
+ version_gte_3_6 = True
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
- facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
+ facts['common']['version_gte_3_6'] = version_gte_3_6
- if version_gte_3_6_or_1_6:
- examples_content_version = 'v1.6'
+ if version_gte_3_6:
+ examples_content_version = 'v3.6'
elif version_gte_3_5_or_1_5:
examples_content_version = 'v1.5'
elif version_gte_3_4_or_1_4:
@@ -1302,7 +1302,7 @@ def get_version_output(binary, version_cmd):
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
- if is_service_running('docker'):
+ if is_service_running('docker') or is_service_running('container-engine'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
@@ -1791,6 +1791,12 @@ def set_container_facts_if_unset(facts):
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
+ # If openshift_docker_use_system_container is set and is True ....
+ if 'use_system_container' in list(facts['docker'].keys()):
+ if facts['docker']['use_system_container']:
+ # ... set the service name to container-engine
+ facts['docker']['service_name'] = 'container-engine'
+
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
@@ -1910,14 +1916,16 @@ class OpenShiftFacts(object):
)
self.role = role
+ # Collect system facts and preface each fact with 'ansible_'.
try:
- # ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
+ additional_facts = {}
for (k, v) in self.system_facts.items():
- self.system_facts["ansible_%s" % k.replace('-', '_')] = v
+ additional_facts["ansible_%s" % k.replace('-', '_')] = v
+ self.system_facts.update(additional_facts)
except UnboundLocalError:
- # ansible-2.2
+ # ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
@@ -2071,6 +2079,7 @@ class OpenShiftFacts(object):
hosted_registry_insecure = get_hosted_registry_insecure()
if hosted_registry_insecure is not None:
docker['hosted_registry_insecure'] = hosted_registry_insecure
+ docker['service_name'] = 'docker'
defaults['docker'] = docker
if 'clock' in roles:
@@ -2155,6 +2164,12 @@ class OpenShiftFacts(object):
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
+ glusterfs=dict(
+ endpoints='glusterfs-registry-endpoints',
+ path='glusterfs-registry-volume',
+ readOnly=False,
+ swap=False,
+ swapcopy=True),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 208e81048..7bce7f107 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -1,4 +1,3 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible callback plugin.
'''
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index a46589443..4460ec324 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module for yum-based systems determining if multiple releases
of an OpenShift package are available, and if the release requested
diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py
index 630ebc848..433795b67 100755
--- a/roles/openshift_health_checker/library/check_yum_update.py
+++ b/roles/openshift_health_checker/library/check_yum_update.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module to test whether a yum update or install will succeed,
without actually performing it or running yum.
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
new file mode 100644
index 000000000..c2792a0fe
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -0,0 +1,65 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
+ """Check that recommended disk space is available before a first-time install."""
+
+ name = "disk_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_disk_space_bytes = {
+ "masters": 40 * 10**9,
+ "nodes": 15 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended disk space requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_disk_space_recommendation = bool(set(group_names).intersection(cls.recommended_disk_space_bytes))
+ return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ ansible_mounts = get_var(task_vars, "ansible_mounts")
+
+ min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
+ free_bytes = self.openshift_available_disk(ansible_mounts)
+
+ if free_bytes < min_free_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available disk space ({:.1f} GB) for the volume containing '
+ '"/var" is below minimum recommended space ({:.1f} GB)'
+ ).format(float(free_bytes) / 10**9, float(min_free_bytes) / 10**9)
+ }
+
+ return {}
+
+ @staticmethod
+ def openshift_available_disk(ansible_mounts):
+ """Determine the available disk space for an OpenShift installation.
+
+ ansible_mounts should be a list of dicts like the 'setup' Ansible module
+ returns.
+ """
+ # priority list in descending order
+ supported_mnt_paths = ["/var", "/"]
+ available_mnts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
+
+ try:
+ for path in supported_mnt_paths:
+ if path in available_mnts:
+ return available_mnts[path]["size_available"]
+ except KeyError:
+ pass
+
+ paths = ''.join(sorted(available_mnts)) or 'none'
+ msg = "Unable to determine available disk space. Paths mounted: {}.".format(paths)
+ raise OpenShiftCheckException(msg)
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
new file mode 100644
index 000000000..28805dc37
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -0,0 +1,44 @@
+# pylint: disable=missing-docstring
+from openshift_checks import OpenShiftCheck, get_var
+
+
+class MemoryAvailability(OpenShiftCheck):
+ """Check that recommended memory is available."""
+
+ name = "memory_availability"
+ tags = ["preflight"]
+
+ # Values taken from the official installation documentation:
+ # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
+ recommended_memory_bytes = {
+ "masters": 16 * 10**9,
+ "nodes": 8 * 10**9,
+ "etcd": 20 * 10**9,
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have recommended memory requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
+ return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
+
+ def run(self, tmp, task_vars):
+ group_names = get_var(task_vars, "group_names")
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+
+ min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+
+ if total_memory_bytes < min_memory_bytes:
+ return {
+ 'failed': True,
+ 'msg': (
+ 'Available memory ({available:.1f} GB) '
+ 'below recommended value ({recommended:.1f} GB)'
+ ).format(
+ available=float(total_memory_bytes) / 10**9,
+ recommended=float(min_memory_bytes) / 10**9,
+ ),
+ }
+
+ return {}
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 657e15160..20d160eaf 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,8 @@
-# pylint: disable=missing-docstring
+# pylint: disable=missing-docstring,too-few-public-methods
+"""
+Mixin classes meant to be used with subclasses of OpenShiftCheck.
+"""
+
from openshift_checks import get_var
@@ -7,12 +11,5 @@ class NotContainerizedMixin(object):
@classmethod
def is_active(cls, task_vars):
- return (
- # This mixin is meant to be used with subclasses of OpenShiftCheck.
- super(NotContainerizedMixin, cls).is_active(task_vars) and
- not cls.is_containerized(task_vars)
- )
-
- @staticmethod
- def is_containerized(task_vars):
- return get_var(task_vars, "openshift", "common", "is_containerized")
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index cca2d8b75..682f6bd40 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -9,6 +9,13 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+
def run(self, tmp, task_vars):
args = {
"requested_openshift_release": get_var(task_vars, "openshift_release", default=''),
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index a877246f4..2693ae37b 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -1,5 +1,7 @@
import pytest
+from ansible.playbook.play_context import PlayContext
+
from openshift_health_check import ActionModule, resolve_checks
from openshift_checks import OpenShiftCheckException
@@ -34,7 +36,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
@pytest.fixture
def plugin():
task = FakeTask('openshift_health_check', {'checks': ['fake_check']})
- plugin = ActionModule(task, None, None, None, None, None)
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
return plugin
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
new file mode 100644
index 000000000..970b474d7
--- /dev/null
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -0,0 +1,155 @@
+import pytest
+
+from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['etcd'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_is_active(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('ansible_mounts,extra_words', [
+ ([], ['none']), # empty ansible_mounts
+ ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
+ ([{'mount': '/var'}], ['/var']), # missing size_available
+])
+def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=['masters'],
+ ansible_mounts=ansible_mounts,
+ )
+ check = DiskAvailability(execute_module=fake_execute_module)
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in 'determine available disk'.split() + extra_words:
+ assert word in str(excinfo.value)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 40 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+ (
+ ['etcd'],
+ [{
+ # not enough space on / ...
+ 'mount': '/',
+ 'size_available': 0,
+ }, {
+ # ... but enough on /var
+ 'mount': '/var',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
+])
+def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [
+ (
+ ['masters'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9,
+ }],
+ ['1.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ 'mount': '/',
+ 'size_available': 1,
+ }],
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ [{
+ 'mount': '/',
+ # enough space for a node, not enough for a master
+ 'size_available': 15 * 10**9 + 1,
+ }],
+ ['15.0 GB'],
+ ),
+ (
+ ['etcd'],
+ [{
+ # enough space on / ...
+ 'mount': '/',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ # .. but not enough on /var
+ 'mount': '/var',
+ 'size_available': 0,
+ }],
+ ['0.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
new file mode 100644
index 000000000..e161a5b9e
--- /dev/null
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -0,0 +1,91 @@
+import pytest
+
+from openshift_checks.memory_availability import MemoryAvailability
+
+
+@pytest.mark.parametrize('group_names,is_active', [
+ (['masters'], True),
+ (['nodes'], True),
+ (['etcd'], True),
+ (['masters', 'nodes'], True),
+ (['masters', 'etcd'], True),
+ ([], False),
+ (['lb'], False),
+ (['nfs'], False),
+])
+def test_is_active(group_names, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ )
+ assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
+ (
+ ['masters'],
+ 17200,
+ ),
+ (
+ ['nodes'],
+ 8200,
+ ),
+ (
+ ['etcd'],
+ 22200,
+ ),
+ (
+ ['masters', 'nodes'],
+ 17000,
+ ),
+])
+def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
+ (
+ ['masters'],
+ 0,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes'],
+ 100,
+ ['0.1 GB'],
+ ),
+ (
+ ['etcd'],
+ -1,
+ ['0.0 GB'],
+ ),
+ (
+ ['nodes', 'masters'],
+ # enough memory for a node, not enough for a master
+ 11000,
+ ['11.0 GB'],
+ ),
+])
+def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+ task_vars = dict(
+ group_names=group_names,
+ ansible_memtotal_mb=ansible_memtotal_mb,
+ )
+
+ check = MemoryAvailability(execute_module=fake_execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+
+ assert result['failed']
+ for word in 'below recommended'.split() + extra_words:
+ assert word in result['msg']
+
+
+def fake_execute_module(*args):
+ raise AssertionError('this function should not be called')
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index c6889ee9b..196d9816a 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,3 +1,5 @@
+import pytest
+
from openshift_checks.package_version import PackageVersion
@@ -22,3 +24,23 @@ def test_package_version():
check = PackageVersion(execute_module=execute_module)
result = check.run(tmp=None, task_vars=task_vars)
assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert PackageVersion.is_active(task_vars=task_vars) == is_active
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 6d576df71..3e5d7f860 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -28,6 +28,14 @@ From this role:
| openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |
| openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
+If you specify `openshift_hosted_registry_kind=glusterfs`, the following
+variables also control configuration behavior:
+
+| Name | Default value | Description |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume |
+
Dependencies
------------
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 596b36239..e7e62e5e4 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -24,9 +24,9 @@ openshift_hosted_routers:
ports:
- 80:80
- 443:443
- certificates: "{{ openshift_hosted_router_certificates | default({}) }}"
+ certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
-openshift_hosted_router_certificates: {}
+openshift_hosted_router_certificate: {}
openshift_hosted_registry_cert_expire_days: 730
openshift_hosted_router_create_certificate: False
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 0b8042473..751489958 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,7 +61,7 @@
name: "{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
-- name: Grant the registry serivce account access to the appropriate scc
+- name: Grant the registry service account access to the appropriate scc
oc_adm_policy_user:
user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
@@ -109,7 +109,7 @@
type: persistentVolumeClaim
claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
@@ -123,3 +123,7 @@
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+
+- include: storage/glusterfs.yml
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
new file mode 100644
index 000000000..e6bb196b8
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -0,0 +1,92 @@
+---
+- name: Get registry DeploymentConfig
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: dc
+ name: "{{ openshift_hosted_registry_name }}"
+ register: registry_dc
+
+- name: Wait for registry pods
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: pod
+ selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
+ register: registry_pods
+ until:
+ - "registry_pods.results.results[0]['items'] | count > 0"
+ # There must be as many matching pods with 'Ready' status True as there are expected replicas
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ delay: 10
+ retries: "{{ (600 / 10) | int }}"
+
+- name: Determine registry fsGroup
+ set_fact:
+ openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}"
+
+- name: Create temp mount directory
+ command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Mount registry volume
+ mount:
+ state: mounted
+ fstype: glusterfs
+ src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ name: "{{ mktemp.stdout }}"
+
+- name: Set registry volume permissions
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: directory
+ group: "{{ openshift_hosted_registry_fsgroup }}"
+ mode: "2775"
+ recurse: True
+
+- block:
+ - name: Activate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+
+ - name: Get first registry pod name
+ set_fact:
+ registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}"
+
+ - name: Copy current registry contents to new GlusterFS volume
+ command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
+ when: openshift.hosted.registry.storage.glusterfs.swapcopy
+
+ - name: Swap new GlusterFS registry volume
+ oc_volume:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ vol_name: registry-storage
+ mount_type: pvc
+ claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+
+ - name: Deactivate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ state: absent
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Unmount registry volume
+ mount:
+ state: unmounted
+ name: "{{ mktemp.stdout }}"
+
+- name: Delete temp mount directory
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index c71d0a34f..e75e3b16f 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -25,13 +25,13 @@
hostnames:
- "{{ openshift_master_default_subdomain }}"
- "*.{{ openshift_master_default_subdomain }}"
- cert: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
- key: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
+ cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}"
+ key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}"
with_items: "{{ openshift_hosted_routers }}"
- - name: set the openshift_hosted_router_certificates
+ - name: set the openshift_hosted_router_certificate
set_fact:
- openshift_hosted_router_certificates:
+ openshift_hosted_router_certificate:
certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
@@ -44,7 +44,7 @@
backup: True
dest: "/etc/origin/master/{{ item | basename }}"
src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
when: not openshift_hosted_router_create_certificate
@@ -82,9 +82,9 @@
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
- cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}"
- key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}"
- cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}"
+ cert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else omit }}"
+ key_file: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else omit }}"
+ cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.cafile | basename)) if 'cafile' in item.certificate else omit }}"
edits: "{{ openshift_hosted_router_edits | union(item.edits) }}"
ports: "{{ item.ports }}"
stats_port: "{{ item.stats_port }}"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index afd82766f..78b624109 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -36,7 +36,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+ failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
- name: "Create templates for logging accounts and the deployer"
command: >
@@ -60,21 +60,21 @@
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+ failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
- name: "Set permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- name: "Set additional permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
command: >
@@ -82,13 +82,13 @@
policy add-cluster-role-to-user rolebinding-reader \
system:serviceaccount:logging:aggregated-logging-elasticsearch
register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+ failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
- name: "Create ConfigMap for deployer parameters"
command: >
{{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+ failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
- name: "Process the deployer template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 6a442cefc..15dd1bd54 100644
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -81,7 +81,7 @@
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
changed_when: metrics_deployer_secret.rc == 0
- failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+ failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
# TODO: extend this to allow user passed in certs or generating cert with
# OpenShift CA
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index c67058696..5abb2ef83 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -223,7 +223,7 @@ items:
-
description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
index 6ead122c5..1d319eab8 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml
@@ -105,7 +105,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "3.4.0"
+ value: "v3.4"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -118,7 +118,7 @@ parameters:
description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
name: MODE
value: "deploy"
--
+-
description: "Set to true to continue even if the deployer runs into an error."
name: CONTINUE_ON_ERROR
value: "false"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
deleted file mode 100644
index fdfc285ca..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml
+++ /dev/null
@@ -1,345 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- kind: ClusterRole
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- kind: ClusterRole
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- kind: ClusterRole
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
- -
- description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
- name: IMAGE_VERSION
- value: "3.4.0"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
deleted file mode 100644
index c4ab794ae..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "v3.5"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
deleted file mode 100644
index 5b5503500..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml
+++ /dev/null
@@ -1,342 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-account-template
- annotations:
- description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
- tags: "infrastructure"
- objects:
- -
- apiVersion: v1
- kind: ServiceAccount
- name: logging-deployer
- metadata:
- name: logging-deployer
- labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-kibana
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-elasticsearch
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-fluentd
- -
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: aggregated-logging-curator
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: oauth-editor
- rules:
- - resources:
- - oauthclients
- verbs:
- - create
- - delete
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: daemonset-admin
- rules:
- - resources:
- - daemonsets
- apiGroups:
- - extensions
- verbs:
- - create
- - get
- - list
- - watch
- - delete
- - update
- - apiVersion: v1
- kind: ClusterRole
- metadata:
- name: rolebinding-reader
- rules:
- - resources:
- - clusterrolebindings
- verbs:
- - get
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-edit-role
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-deployer-dsadmin-role
- roleRef:
- name: daemonset-admin
- subjects:
- - kind: ServiceAccount
- name: logging-deployer
- -
- apiVersion: v1
- kind: RoleBinding
- metadata:
- name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
--
- apiVersion: "v1"
- kind: "Template"
- metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
- tags: "infrastructure"
- labels:
- logging-infra: deployer
- provider: openshift
- objects:
- -
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: IMAGE_PULL_SECRET
- value: ${IMAGE_PULL_SECRET}
- - name: INSECURE_REGISTRY
- value: ${INSECURE_REGISTRY}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_PVC_SIZE
- value: ${ES_PVC_SIZE}
- - name: ES_PVC_PREFIX
- value: ${ES_PVC_PREFIX}
- - name: ES_PVC_DYNAMIC
- value: ${ES_PVC_DYNAMIC}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_PVC_SIZE
- value: ${ES_OPS_PVC_SIZE}
- - name: ES_OPS_PVC_PREFIX
- value: ${ES_OPS_PVC_PREFIX}
- - name: ES_OPS_PVC_DYNAMIC
- value: ${ES_OPS_PVC_DYNAMIC}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- - name: ES_NODESELECTOR
- value: ${ES_NODESELECTOR}
- - name: ES_OPS_NODESELECTOR
- value: ${ES_OPS_NODESELECTOR}
- - name: KIBANA_NODESELECTOR
- value: ${KIBANA_NODESELECTOR}
- - name: KIBANA_OPS_NODESELECTOR
- value: ${KIBANA_OPS_NODESELECTOR}
- - name: CURATOR_NODESELECTOR
- value: ${CURATOR_NODESELECTOR}
- - name: CURATOR_OPS_NODESELECTOR
- value: ${CURATOR_OPS_NODESELECTOR}
- - name: MODE
- value: ${MODE}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- parameters:
- -
- description: "The mode that the deployer runs in."
- name: MODE
- value: "install"
- -
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
- -
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
- -
- description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
- name: IMAGE_PULL_SECRET
- -
- description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
- name: INSECURE_REGISTRY
- value: "false"
- -
- description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
- -
- description: "(Deprecated) External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- value: "kibana.example.com"
- -
- description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
- -
- description: "(Deprecated) External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- value: "https://localhost:8443"
- -
- description: "(Deprecated) Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
- -
- description: "(Deprecated) How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- value: "1"
- -
- description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
- name: ES_PVC_PREFIX
- value: "logging-es-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
- name: ES_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
- -
- description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
- -
- description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
- -
- description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
- name: ES_OPS_PVC_SIZE
- -
- description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
- name: ES_OPS_PVC_PREFIX
- value: "logging-es-ops-"
- -
- description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
- name: ES_OPS_PVC_DYNAMIC
- -
- description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
- -
- description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
- -
- description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
- -
- description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
- -
- description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
- -
- description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
- name: ES_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
- name: ES_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana cluster (label=value)."
- name: KIBANA_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
- name: KIBANA_OPS_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector Curator (label=value)."
- name: CURATOR_NODESELECTOR
- value: ""
- -
- description: "(Deprecated) Node selector operations Curator (label=value)."
- name: CURATOR_OPS_NODESELECTOR
- value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
deleted file mode 100644
index d191c0439..000000000
--- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- securityContext: {}
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- securityContext: {}
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: MODE
- value: ${MODE}
- - name: CONTINUE_ON_ERROR
- value: ${CONTINUE_ON_ERROR}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: IGNORE_PREFLIGHT
- value: ${IGNORE_PREFLIGHT}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: DYNAMICALLY_PROVISION_STORAGE
- value: ${DYNAMICALLY_PROVISION_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- - name: USER_WRITE_ACCESS
- value: ${USER_WRITE_ACCESS}
- - name: HEAPSTER_NODE_ID
- value: ${HEAPSTER_NODE_ID}
- - name: METRIC_RESOLUTION
- value: ${METRIC_RESOLUTION}
- - name: STARTUP_TIMEOUT
- value: ${STARTUP_TIMEOUT}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "openshift/origin-"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
- name: MODE
- value: "deploy"
--
- description: "Set to true to continue even if the deployer runs into an error."
- name: CONTINUE_ON_ERROR
- value: "false"
--
- description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
- name: REDEPLOY
- value: "false"
--
- description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
- name: IGNORE_PREFLIGHT
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
- name: DYNAMICALLY_PROVISION_STORAGE
- value: "false"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "10Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
--
- description: "If a user accounts should be allowed to write metrics."
- name: USER_WRITE_ACCESS
- value: "false"
--
- description: "The identifier used when generating metric ids in Hawkular"
- name: HEAPSTER_NODE_ID
- value: "nodename"
--
- description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds"
- name: METRIC_RESOLUTION
- value: "30s"
--
- description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
- name: STARTUP_TIMEOUT
- value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 28feac4e6..8fe02444e 100644
--- a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -103,9 +103,9 @@ parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
- - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.5", set version "3.5"'
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.6", set version "3.6"'
name: IMAGE_VERSION
- value: "3.5"
+ value: "3.6"
- description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
name: OPENSHIFT_OAUTH_PROVIDER_URL
required: true
diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index 80cc4233b..80cc4233b 100644
--- a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 5385df3b7..72182fcdd 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 42f4fc72e..cba0f2de8 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -91,8 +91,6 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
-- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
-- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 96ed44011..f43336dc4 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
+openshift_logging_nodeselector: null
+openshift_logging_labels: {}
+openshift_logging_label_key: ""
+openshift_logging_label_value: ""
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
@@ -22,10 +26,10 @@ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
-openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -46,10 +50,10 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
-openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -68,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"
openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
@@ -113,12 +117,19 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_es_ops_number_of_shards: 1
-openshift_logging_es_ops_number_of_replicas: 0
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+# mux - secure_forward listener service
+openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
+# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
+openshift_logging_use_mux_client: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+openshift_logging_mux_cpu_limit: 100m
+openshift_logging_mux_memory_limit: 512Mi
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
@@ -127,3 +138,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+#fluentd_mux_config_contents:
+#fluentd_mux_secureforward_contents:
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 64bc33435..a55e72725 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -37,7 +37,7 @@ LOGGING_INFRA_KEY = "logging-infra"
# selectors for filtering resources
DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
-ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
+ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 188ea246c..2f5b68b4d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -44,6 +44,7 @@
- logging-kibana
- logging-kibana-proxy
- logging-curator
+ - logging-mux
ignore_errors: yes
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
@@ -109,5 +110,6 @@
- logging-curator
- logging-elasticsearch
- logging-fluentd
+ - logging-mux
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 740e490e1..b34df018d 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -45,6 +45,21 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: mux
+ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
+ when: openshift_logging_use_mux
+
+- include: procure_shared_key.yaml
+ loop_control:
+ loop_var: shared_key_info
+ with_items:
+ - procure_component: mux
+ when: openshift_logging_use_mux
+
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
when: server_tls_json is undefined
@@ -85,6 +100,14 @@
loop_control:
loop_var: node_name
+- name: Generate PEM cert for mux
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.mux
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_use_mux
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 253543f54..b047eb35a 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -21,6 +21,8 @@
dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
vars:
- allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
when: es_config_contents is undefined
changed_when: no
@@ -134,3 +136,43 @@
when: fluentd_configmap.stdout is defined
changed_when: no
check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
+ register: mux_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{mux_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
+ when: mux_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+ when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index e77da7a24..f76bb3a0a 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -1,14 +1,14 @@
---
- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ when: openshift_logging_kibana_key | trim | length > 0
changed_when: false
- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: "{{openshift_logging_kibana_cert | trim | length > 0}}"
+ when: openshift_logging_kibana_cert | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: "{{openshift_logging_kibana_ca | trim | length > 0}}"
+ when: openshift_logging_kibana_ca | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index f396bcc6d..c1da49fd8 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -34,6 +34,36 @@
check_mode: no
changed_when: no
+- name: Retrieving the cert to use when generating secrets for mux
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: mux_key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "mux_key", file: "system.logging.mux.key"}
+ - { name: "mux_cert", file: "system.logging.mux.crt"}
+ - { name: "mux_shared_key", file: "mux_shared_key"}
+ when: openshift_logging_use_mux
+
+- name: Generating secrets for mux
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: "logging-{{component}}"
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
+ secret_keys: ["ca", "cert", "key", "shared_key"]
+ with_items:
+ - mux
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux
+
- name: Generating secrets for kibana proxy
template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
vars:
@@ -43,7 +73,7 @@
- {key: session-secret, value: "{{session_secret}}"}
- {key: server-key, value: "{{kibana_key_file}}"}
- {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls, value: "{{server_tls_file}}"}
+ - {key: server-tls.json, value: "{{server_tls_file}}"}
secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
index 5091c1209..e3a5c5eb3 100644
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -85,3 +85,35 @@
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
+
+- name: Generating logging-mux service for external connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ externalIPs:
+ - "{{ ansible_eth0.ipv4.address }}"
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_mux_allow_external
+
+- name: Generating logging-mux service for intra-cluster connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 28fad420b..a981e7f7f 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -3,62 +3,51 @@
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+ when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
-- set_fact: es_pvc_pool={{[]}}
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+### evaluate if the PVC attached to the dc currently matches the provided vars
+## if it does then we reuse that pvc in the DC
+- include: set_es_storage.yaml
vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ es_component: es
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ es_indices | default([]) }}"
loop_control:
- loop_var: deploy_name
-
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
- vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
+ loop_var: deployment
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
+ es_component: es
+ es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -73,74 +62,57 @@
es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
+ - openshift_logging_use_ops | bool
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
- vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- loop_control:
- loop_var: deploy_name
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
when:
- - openshift_logging_use_ops | bool
+ - openshift_logging_use_ops | bool
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
+- include: set_es_storage.yaml
vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
+ es_component: es-ops
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ es_ops_indices | default([]) }}"
+ loop_control:
+ loop_var: deployment
when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ - openshift_logging_use_ops | bool
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
- es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
+ es_component: es-ops
+ es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- - openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
+ - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
index 35273829c..6bc405819 100644
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -32,7 +32,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
check_mode: no
when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -49,6 +49,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
check_mode: no
when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 83b68fa77..aec455c22 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -27,6 +27,10 @@
loop_control:
loop_var: install_component
+- name: Install logging mux
+ include: "{{ role_path }}/tasks/install_mux.yaml"
+ when: openshift_logging_use_mux
+
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
new file mode 100644
index 000000000..91eeb95a1
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -0,0 +1,67 @@
+---
+- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Check mux current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: mux_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generating mux deploymentconfig
+ template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ ops_host: "{{ mux_ops_host }}"
+ ops_port: "{{ mux_ops_port }}"
+ mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
+ mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
+ replicas: "{{mux_replica_count.stdout | default (0)}}"
+ mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check mux hostmount-anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/hostmount-anyuid -o jsonpath='{.users}'
+ register: mux_hostmount_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set hostmount-anyuid permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux_output
+ failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
+ check_mode: no
+ when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check mux cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: mux_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux2_output
+ failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
+ check_mode: no
+ when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
index da0bbb627..877ce3149 100644
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -1,17 +1,36 @@
---
# This is the base configuration for installing the other components
-- name: Check for logging project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
- register: logging_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
-- name: "Create logging project"
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in logging_project_result.stderr
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
- name: Create logging cert directory
file: path={{openshift.common.config_base}}/logging state=directory mode=0755
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c7f4a2f93..3d8cd3410 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- fail:
msg: Only one Fluentd nodeselector key pair should be provided
- when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+ when: openshift_logging_fluentd_nodeselector.keys() | count > 1
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
@@ -28,6 +28,7 @@
register: local_tmp
changed_when: False
check_mode: no
+ become: no
- debug: msg="Created local temp dir {{local_tmp.stdout}}"
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index cb9509de1..a0ed56ebd 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -1,52 +1,52 @@
---
-- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
+- oc_obj:
+ kind: "{{ file_content.kind }}"
+ name: "{{ file_content.metadata.name }}"
+ state: present
+ namespace: "{{ namespace }}"
+ files:
+ - "{{ file_name }}"
+ when: file_content.kind not in ["Service", "Route"]
-- name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
+## still need to do this for services until the template logic is replaced by oc_*
+- block:
+ - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
-- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_changed
- failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
- changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
- when:
- - "'field is immutable' not in generation_apply.stderr"
+ - name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
-- name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
-- name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
+ when: file_content.kind in ["Service", "Route"]
diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml
new file mode 100644
index 000000000..056ff6b98
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_shared_key.yaml
@@ -0,0 +1,25 @@
+---
+- name: Checking for {{ shared_key_info.procure_component }}_shared_key
+ stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key"
+ register: component_shared_key_file
+ check_mode: no
+
+- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }}
+ set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }}
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ check_mode: no
+
+- name: Creating shared_key for {{ shared_key_info.procure_component }}
+ copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}"
+ dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - not component_shared_key_file.stat.exists
+
+- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ - not component_shared_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
new file mode 100644
index 000000000..4afe4e641
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -0,0 +1,80 @@
+---
+- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
+ when: es_spec.volumes is defined
+
+- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: es_storage_claim=""
+ when:
+ - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
+
+## take an ES dc and evaluate its storage option
+# if it is a hostmount or emptydir we don't do anything with it
+# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
+- oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ es_storage_claim }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ register: pvc_spec
+ failed_when: pvc_spec.results.stderr is defined
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
+ when:
+ - pvc_spec.results is defined
+ - pvc_spec.results.results[0].spec is defined
+
+# if not create the pvc and use it
+- block:
+
+ - name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: not es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+
+ when:
+ - es_pvc_size | search('^\d.*')
+ - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: "{{ es_component }}"
+ deploy_name: "{{ es_name }}"
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{ es_component }}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{ es_cpu_limit }}"
+ es_memory_limit: "{{ es_memory_limit }}"
+ es_node_selector: "{{ es_node_selector }}"
+ es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index edbb62c3e..c1592b830 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -21,6 +21,29 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: start mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
+ loop_control:
+ loop_var: object
+ when:
+ - mux_dc.results is defined
+ - mux_dc.results.results is defined
+ - openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 4b3722e29..f4b419d84 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: stop mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: 0
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index cef835668..10f522b61 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.loggingPublicURL
yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index a0fefd882..c6284166b 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
index 93c4d854c..355642cb7 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -28,11 +28,10 @@ cloud:
discovery:
type: kubernetes
zen.ping.multicast.enabled: false
- zen.minimum_master_nodes: {{es_min_masters}}
+ zen.minimum_master_nodes: ${NODE_QUORUM}
gateway:
- expected_master_nodes: ${NODE_QUORUM}
- recover_after_nodes: ${RECOVER_AFTER_NODES}
+ recover_after_nodes: ${NODE_QUORUM}
expected_nodes: ${RECOVER_EXPECTED_NODES}
recover_after_time: ${RECOVER_AFTER_TIME}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 16185fc1d..680c16cf4 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -78,9 +78,6 @@ spec:
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
-
- name: "RECOVER_AFTER_NODES"
- value: "{{es_recover_after_nodes}}"
- -
name: "RECOVER_EXPECTED_NODES"
value: "{{es_recover_expected_nodes}}"
-
@@ -95,6 +92,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 0bf1686ad..5c93d823e 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -59,6 +59,14 @@ spec:
- name: dockercfg
mountPath: /etc/sysconfig/docker
readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+{% endif %}
env:
- name: "K8S_HOST_URL"
value: "{{openshift_logging_master_url}}"
@@ -122,6 +130,8 @@ spec:
value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: "USE_MUX_CLIENT"
+ value: "{{openshift_logging_use_mux_client| default('false')}}"
volumes:
- name: runlogjournal
hostPath:
@@ -147,3 +157,11 @@ spec:
- name: dockercfg
hostPath:
path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
+{% endif %}
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index e6ecf82ff..25fab9ac4 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -44,15 +44,19 @@ spec:
{% if kibana_cpu_limit is not none %}
cpu: "{{kibana_cpu_limit}}"
{% endif %}
-{% if kibana_memory_limit is not none %}
- memory: "{{kibana_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_memory_limit | default('736Mi') }}"
{% endif %}
env:
- name: "ES_HOST"
value: "{{es_host}}"
- name: "ES_PORT"
value: "{{es_port}}"
+ -
+ name: "KIBANA_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana
+ resource: limits.memory
volumeMounts:
- name: kibana
mountPath: /etc/kibana/keys
@@ -67,9 +71,7 @@ spec:
{% if kibana_proxy_cpu_limit is not none %}
cpu: "{{kibana_proxy_cpu_limit}}"
{% endif %}
-{% if kibana_proxy_memory_limit is not none %}
- memory: "{{kibana_proxy_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"
{% endif %}
ports:
-
@@ -103,6 +105,27 @@ spec:
-
name: "OAP_DEBUG"
value: "{{openshift_logging_kibana_proxy_debug}}"
+ -
+ name: "OAP_OAUTH_SECRET_FILE"
+ value: "/secret/oauth-secret"
+ -
+ name: "OAP_SERVER_CERT_FILE"
+ value: "/secret/server-cert"
+ -
+ name: "OAP_SERVER_KEY_FILE"
+ value: "/secret/server-key"
+ -
+ name: "OAP_SERVER_TLS_FILE"
+ value: "/secret/server-tls.json"
+ -
+ name: "OAP_SESSION_SECRET_FILE"
+ value: "/secret/session-secret"
+ -
+ name: "OCP_AUTH_PROXY_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana-proxy
+ resource: limits.memory
volumeMounts:
- name: kibana-proxy
mountPath: /secret
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2
new file mode 100644
index 000000000..41e6abd52
--- /dev/null
+++ b/roles/openshift_logging/templates/mux.j2
@@ -0,0 +1,121 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-fluentd
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in mux_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "mux"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if mux_cpu_limit is not none %}
+ cpu: "{{mux_cpu_limit}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
+ memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ - containerPort: "{{ openshift_logging_mux_port }}"
+ name: mux-forward
+ volumeMounts:
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{openshift_logging_master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "USE_JOURNAL"
+ value: "false"
+ - name: "JOURNAL_SOURCE"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: FORWARD_LISTEN_HOST
+ value: "{{ openshift_logging_mux_hostname }}"
+ - name: FORWARD_LISTEN_PORT
+ value: "{{ openshift_logging_mux_port }}"
+ - name: USE_MUX
+ value: "true"
+ - name: MUX_ALLOW_EXTERNAL
+ value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ volumes:
+ - name: config
+ configMap:
+ name: logging-mux
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
index 6c4ec0c76..70644a39c 100644
--- a/roles/openshift_logging/templates/service.j2
+++ b/roles/openshift_logging/templates/service.j2
@@ -26,3 +26,9 @@ spec:
{% for key, value in selector.iteritems() %}
{{key}}: {{value}}
{% endfor %}
+{% if externalIPs is defined -%}
+ externalIPs:
+{% for ip in externalIPs %}
+ - {{ ip }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index e06625e3f..e561b41e2 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -1,12 +1,8 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}"
-es_min_masters_default: "{{ (openshift_logging_es_cluster_size | int / 2 | round(0,'floor') + 1) | int }}"
-es_min_masters: "{{ (openshift_logging_es_cluster_size == 1) | ternary(1, es_min_masters_default)}}"
-es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}"
-es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}"
-es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}"
-es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size|int - 1}}"
-es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size|int}}"
+es_node_quorum: "{{ (openshift_logging_es_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size | int}}"
+es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"
es_log_appenders: ['file', 'console']
diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml
index 9679d209a..92e68a0a3 100644
--- a/roles/openshift_logging/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index f202486a5..cfc4e2722 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -3,24 +3,13 @@
msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
when: not openshift.common.version_gte_3_1_or_1_1 | bool
-- name: Copy Configuration to temporary conf
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
- changed_when: false
-
- name: Add Management Infrastructure project
- command: >
- {{ openshift.common.client_binary }} adm new-project
- management-infra
- --description="Management Infrastructure"
- --config={{manage_iq_tmp_conf}}
- register: osmiq_create_mi_project
- failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
- changed_when: osmiq_create_mi_project.rc == 0
+ oc_project:
+ name: management-infra
+ description: Management Infrastructure
- name: Create Admin and Image Inspector Service Account
oc_serviceaccount:
- kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
name: "{{ item }}"
namespace: management-infra
state: present
@@ -28,51 +17,42 @@
- management-admin
- inspector-admin
-- name: Create Cluster Role
- shell: >
- echo {{ manageiq_cluster_role | to_json | quote }} |
- {{ openshift.common.client_binary }} create
- --config={{manage_iq_tmp_conf}}
- -f -
- register: osmiq_create_cluster_role
- failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
- changed_when: osmiq_create_cluster_role.rc == 0
+- name: Create manageiq cluster role
+ oc_clusterrole:
+ name: management-infra-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods/proxy
+ verbs:
+ - "*"
- name: Create Hawkular Metrics Admin Cluster Role
- shell: >
- echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} |
- {{ openshift.common.client_binary }}
- --config={{manage_iq_tmp_conf}}
- create -f -
- register: oshawkular_create_cluster_role
- failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
- changed_when: oshawkular_create_cluster_role.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to verify the if the role even exists before
- # we run this task.
+ oc_clusterrole:
+ name: hawkular-metrics-admin
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - hawkular-alerts
+ - hawkular-metrics
+ verbs:
+ - "*"
- name: Configure role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
- with_items: "{{manage_iq_tasks}}"
- register: osmiq_perm_task
- failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
- changed_when: osmiq_perm_task.rc == 0
- # AUDIT:changed_when_note: Checking the return code is insufficient
- # here. We really need to compare the current role/user permissions
- # with their expected state. I think we may have a module for this?
-
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
+ with_items: "{{ manage_iq_tasks }}"
- name: Configure 3_2 role/user permissions
- command: >
- {{ openshift.common.client_binary }} adm {{item}}
- --config={{manage_iq_tmp_conf}}
+ oc_adm_policy_user:
+ namespace: management-infra
+ resource_name: "{{ item.resource_name }}"
+ resource_kind: "{{ item.resource_kind }}"
+ user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- register: osmiq_perm_3_2_task
- failed_when: osmiq_perm_3_2_task.rc != 0
- changed_when: osmiq_perm_3_2_task.rc == 0
when: openshift.common.version_gte_3_2_or_1_2 | bool
-
-- name: Clean temporary configuration file
- file: path={{manage_iq_tmp_conf}} state=absent
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 9936bb126..15d667628 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,41 +1,31 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
-
-manageiq_metrics_admin_clusterrole:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: hawkular-metrics-admin
- rules:
- - apiGroups:
- - ""
- resources:
- - hawkular-metrics
- - hawkular-alerts
- verbs:
- - '*'
-
-manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
-
manage_iq_tasks:
-- policy add-role-to-user -n management-infra admin -z management-admin
-- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
-- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
-- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
-- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
-- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- resource_kind: role
+ resource_name: admin
+ user: management-admin
+- resource_kind: role
+ resource_name: management-infra-admin
+ user: management-admin
+- resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-puller
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: scc
+ resource_name: privileged
+ user: system:serviceaccount:management-infra:inspector-admin
+- resource_kind: cluster-role
+ resource_name: self-provisioner
+ user: system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: hawkular-metrics-admin
+ user: system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
-- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- resource_kind: cluster-role
+ resource_name: system:image-auditor
+ user: system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/files/atomic-openshift-master.service b/roles/openshift_master/files/atomic-openshift-master.service
new file mode 100644
index 000000000..02af4dd16
--- /dev/null
+++ b/roles/openshift_master/files/atomic-openshift-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Atomic OpenShift Master
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=atomic-openshift-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/atomic-openshift-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=atomic-openshift-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=atomic-openshift-node.service
diff --git a/roles/openshift_master/files/origin-master.service b/roles/openshift_master/files/origin-master.service
new file mode 100644
index 000000000..cf79dda02
--- /dev/null
+++ b/roles/openshift_master/files/origin-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Origin Master Service
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=origin-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/origin-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=origin-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=origin-node.service
diff --git a/roles/openshift_master/tasks/files b/roles/openshift_master/tasks/files
new file mode 120000
index 000000000..feb122881
--- /dev/null
+++ b/roles/openshift_master/tasks/files
@@ -0,0 +1 @@
+../files \ No newline at end of file
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 98e0da1a2..5522fef26 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -194,7 +194,7 @@
state: stopped
when: openshift_master_ha | bool
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 506c8b129..dfc255b3d 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -32,6 +32,15 @@
- not openshift.common.is_master_system_container | bool
register: create_master_unit_file
+- name: Install Master service file
+ copy:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
+ src: "{{ openshift.common.service_type }}-master.service"
+ register: create_master_unit_file
+ when:
+ - not openshift.common.is_containerized | bool
+ - (openshift.master.ha is not defined or not openshift.master.ha) | bool
+
- command: systemctl daemon-reload
when: create_master_unit_file | changed
@@ -90,6 +99,7 @@
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
+ no_log: True
- name: Preserve Master Controllers Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 155abd970..897ee7285 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
Before={{ openshift.common.service_type }}-node.service
-After=docker.service
-PartOf=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
@@ -23,5 +23,5 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 088e8db43..451f3436a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
Wants={{ openshift.common.service_type }}-master-api.service
After={{ openshift.common.service_type }}-master-api.service
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
@@ -22,4 +22,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 13381cd1a..7f40cb042 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
After=etcd_container.service
Wants=etcd_container.service
@@ -15,4 +15,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index d4c9a96ca..2617efaf1 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -64,10 +64,10 @@
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
--overwrite=false
+ when: item != openshift_ca_host
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
@@ -94,8 +94,8 @@
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ when: item != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_master_facts/defaults/main.yml b/roles/openshift_master_facts/defaults/main.yml
index f1cbbeb2d..a80313505 100644
--- a/roles/openshift_master_facts/defaults/main.yml
+++ b/roles/openshift_master_facts/defaults/main.yml
@@ -1,2 +1,24 @@
---
openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+openshift_master_admission_plugin_config:
+ openshift.io/ImagePolicy:
+ configuration:
+ kind: ImagePolicyConfig
+ apiVersion: v1
+ # To require that all images running on the platform be imported first, you may uncomment the
+ # following rule. Any image that refers to a registry outside of OpenShift will be rejected unless it
+ # unless it points directly to an image digest (myregistry.com/myrepo/image@sha256:ea83bcf...) and that
+ # digest has been imported via the import-image flow.
+ #resolveImages: Required
+ executionRules:
+ - name: execution-denied
+ # Reject all images that have the annotation images.openshift.io/deny-execution set to true.
+ # This annotation may be set by infrastructure that wishes to flag particular images as dangerous
+ onResources:
+ - resource: pods
+ - resource: builds
+ reject: true
+ matchImageAnnotations:
+ - key: images.openshift.io/deny-execution
+ value: "true"
+ skipOnResolutionFailure: true
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 386f544ea..b5be193d2 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-master
'''
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 7f7bc4316..b50d6d9db 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,7 +40,7 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
@@ -49,7 +49,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'origin':
- # convert short_version to enterpise short_version
+ # convert short_version to enterprise short_version
short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 66e6ecea3..a66cb3c88 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -41,7 +41,7 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 6f8f09b22..79f054b42 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -92,7 +92,7 @@
master_count: "{{ openshift_master_count | default(None) }}"
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
- admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
+ admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
@@ -128,10 +128,10 @@
- name: Test if scheduler config is readable
fail:
msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
- when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}"
+ when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'
- name: Set current scheduler predicates and priorities
set_fact:
openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
- when: "{{ scheduler_config_stat.stat.exists }}"
+ when: scheduler_config_stat.stat.exists
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 1fab84c71..4a28fb8f8 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -55,6 +55,8 @@ DEFAULT_PREDICATES_1_5 = [
{'name': 'CheckNodeDiskPressure'},
]
+DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -75,9 +77,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
- ('1.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'origin', DEFAULT_PREDICATES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'origin', DEFAULT_PREDICATES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6),
]
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 1098f9391..97ef2387e 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -42,6 +42,8 @@ DEFAULT_PRIORITIES_1_5 = [
{'name': 'TaintTolerationPriority', 'weight': 1}
]
+DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5
+
ZONE_PRIORITY = {
'name': 'Zone',
'argument': {
@@ -63,9 +65,8 @@ TEST_VARS = [
('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4),
('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
- ('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'origin', DEFAULT_PRIORITIES_1_5),
- ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'origin', DEFAULT_PRIORITIES_3_6),
+ ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6),
]
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index f4c61a75e..84503217b 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -76,7 +76,7 @@ openshift_metrics_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
```
e.g
```
-openshift_metrics_cassandra_limits_memory: 1G
+openshift_metrics_cassandra_limits_memory: 1Gi
openshift_metrics_hawkular_requests_cpu: 100
```
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
deleted file mode 100755
index f977b6dd6..000000000
--- a/roles/openshift_metrics/files/import_jks_certs.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-set -ex
-
-function import_certs() {
- dir=$CERT_DIR
- hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 --decode)
- hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 --decode)
- hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
-
- if [ ! -f $dir/hawkular-metrics.keystore ]; then
- echo "Creating the Hawkular Metrics keystore from the PEM file"
- keytool -importkeystore -v \
- -srckeystore $dir/hawkular-metrics.pkcs12 \
- -destkeystore $dir/hawkular-metrics.keystore \
- -srcstoretype PKCS12 \
- -deststoretype JKS \
- -srcstorepass $hawkular_metrics_keystore_password \
- -deststorepass $hawkular_metrics_keystore_password
- fi
-
- cert_alias_names=(ca metricca)
-
- for cert_alias in ${cert_alias_names[*]}; do
- if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
- echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore"
- keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
- -file ${dir}/ca.crt \
- -keystore $dir/hawkular-metrics.truststore \
- -trustcacerts \
- -storepass $hawkular_metrics_truststore_password
- fi
- done
-}
-
-import_certs
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 01fc1ef64..fb4fe2f03 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -13,42 +13,27 @@
hostnames: hawkular-cassandra
changed_when: no
-- slurp: src={{ mktemp.stdout }}/hawkular-metrics-truststore.pwd
- register: hawkular_truststore_password
-
-- stat: path="{{mktemp.stdout}}/{{item}}"
- register: pwd_file_stat
- with_items:
- - hawkular-metrics.pwd
- - hawkular-metrics.htpasswd
- changed_when: no
-
-- set_fact:
- pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
- with_items: "{{pwd_file_stat.results}}"
- changed_when: no
-
- name: generate password for hawkular metrics
- local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
+- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd"
+ register: hawkular_metrics_pwd
+ no_log: true
+
- name: generate htpasswd file for hawkular metrics
- local_action: >
- shell htpasswd -ci
- '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular
- < '{{ local_tmp.stdout }}/hawkular-metrics.pwd'
+ local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
+ no_log: true
- name: copy local generated passwords to target
copy:
- src: "{{local_tmp.stdout}}/{{item}}"
- dest: "{{mktemp.stdout}}/{{item}}"
+ src: "{{ local_tmp.stdout }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
-- include: import_jks_certs.yaml
-
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
@@ -56,13 +41,11 @@
register: hawkular_secrets
with_items:
- ca.crt
- - hawkular-metrics.crt
- - hawkular-metrics.keystore
- - hawkular-metrics-keystore.pwd
- - hawkular-metrics.truststore
- - hawkular-metrics-truststore.pwd
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
+ - hawkular-metrics.crt
+ - hawkular-metrics.key
+ - hawkular-metrics.pem
- hawkular-cassandra.crt
- hawkular-cassandra.key
- hawkular-cassandra.pem
@@ -73,42 +56,23 @@
{{ hawkular_secrets.results|map(attribute='stdout')|join('
')|from_yaml }}
-- name: generate hawkular-metrics-secrets secret template
+- name: generate hawkular-metrics-certs secret template
template:
src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml"
+ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-certs.yaml"
vars:
- name: hawkular-metrics-secrets
+ name: hawkular-metrics-certs
labels:
- metrics-infra: hawkular-metrics
- data:
- hawkular-metrics.keystore: >
- {{ hawkular_secrets['hawkular-metrics.keystore'] }}
- hawkular-metrics.keystore.password: >
- {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }}
- hawkular-metrics.truststore: >
- {{ hawkular_secrets['hawkular-metrics.truststore'] }}
- hawkular-metrics.truststore.password: >
- {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }}
- hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
- hawkular-metrics.htpasswd.file: >
- {{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
- when: name not in metrics_secrets.stdout_lines
- changed_when: no
-
-- name: generate hawkular-metrics-certificate secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml"
- vars:
- name: hawkular-metrics-certificate
- labels:
- metrics-infra: hawkular-metrics
+ metrics-infra: hawkular-metrics-certs
+ annotations:
+ service.alpha.openshift.io/originating-service-name: hawkular-metrics
data:
- hawkular-metrics.certificate: >
+ tls.crt: >
{{ hawkular_secrets['hawkular-metrics.crt'] }}
- hawkular-metrics-ca.certificate: >
- {{ hawkular_secrets['ca.crt'] }}
+ tls.key: >
+ {{ hawkular_secrets['hawkular-metrics.key'] }}
+ tls.truststore.crt: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
when: name not in metrics_secrets.stdout_lines
changed_when: no
@@ -122,6 +86,7 @@
metrics-infra: hawkular-metrics
data:
hawkular-metrics.username: "{{ 'hawkular'|b64encode }}"
+ hawkular-metrics.htpasswd: "{{ hawkular_secrets['hawkular-metrics.htpasswd'] }}"
hawkular-metrics.password: >
{{ hawkular_secrets['hawkular-metrics.pwd'] }}
when: name not in metrics_secrets.stdout_lines
diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
deleted file mode 100644
index ced2df1d0..000000000
--- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- name: generate heapster key/cert
- command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
- --config={{ mktemp.stdout }}/admin.kubeconfig
- --key='{{ mktemp.stdout }}/heapster.key'
- --cert='{{ mktemp.stdout }}/heapster.cert'
- --hostnames=heapster
- --signer-cert='{{ mktemp.stdout }}/ca.crt'
- --signer-key='{{ mktemp.stdout }}/ca.key'
- --signer-serial='{{ mktemp.stdout }}/ca.serial.txt'
-
-- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
- block:
- - name: read files for the heapster secret
- slurp: src={{ item }}
- register: heapster_secret
- with_items:
- - "{{ mktemp.stdout }}/heapster.cert"
- - "{{ mktemp.stdout }}/heapster.key"
- - "{{ client_ca }}"
- vars:
- custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt"
- default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
- client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"
- - name: generate heapster secret template
- template:
- src: secret.j2
- dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
- force: no
- vars:
- name: heapster-secrets
- labels:
- metrics-infra: heapster
- data:
- heapster.cert: "{{ heapster_secret.results[0].content }}"
- heapster.key: "{{ heapster_secret.results[1].content }}"
- heapster.client-ca: "{{ heapster_secret.results[2].content }}"
- heapster.allowed-users: >
- {{ openshift_metrics_heapster_allowed_users|b64encode }}
diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
new file mode 100644
index 000000000..e81d90ae7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml
@@ -0,0 +1,14 @@
+---
+- name: generate heapster secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
+ force: no
+ vars:
+ name: heapster-secrets
+ labels:
+ metrics-infra: heapster
+ data:
+ heapster.allowed-users: >
+ {{ openshift_metrics_heapster_allowed_users|b64encode }}
+ when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
deleted file mode 100644
index e098145e9..000000000
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.keystore"
- register: metrics_keystore
- check_mode: no
-
-- stat: path="{{mktemp.stdout}}/hawkular-metrics.truststore"
- register: metrics_truststore
- check_mode: no
-
-- block:
- - slurp: src={{ mktemp.stdout }}/hawkular-metrics-keystore.pwd
- register: metrics_keystore_password
-
- - fetch:
- dest: "{{local_tmp.stdout}}/"
- src: "{{ mktemp.stdout }}/{{item}}"
- flat: yes
- changed_when: False
- with_items:
- - hawkular-metrics.pkcs12
- - hawkular-metrics.crt
- - ca.crt
-
- - local_action: command {{role_path}}/files/import_jks_certs.sh
- environment:
- CERT_DIR: "{{local_tmp.stdout}}"
- METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
- METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
- changed_when: False
-
- - copy:
- dest: "{{mktemp.stdout}}/"
- src: "{{item}}"
- with_fileglob: "{{local_tmp.stdout}}/*.*store"
-
- when: not metrics_keystore.stat.exists or
- not metrics_truststore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index a467c1a51..3b4e8560f 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+ when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
- name: generate hawkular-cassandra persistent volume claims
template:
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index c490bcdd3..0eb852d91 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -20,9 +20,9 @@
- set_fact:
heapster_sa_secrets: "{{ heapster_sa_secrets + [item] }}"
with_items:
- - hawkular-metrics-certificate
+ - hawkular-metrics-certs
- hawkular-metrics-account
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
@@ -41,6 +41,8 @@
- {port: 80, targetPort: http-endpoint}
selector:
name: "{{obj_name}}"
+ annotations:
+ service.alpha.openshift.io/serving-cert-secret-name: heapster-certs
labels:
metrics-infra: "{{obj_name}}"
name: "{{obj_name}}"
@@ -64,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_certificates.yaml
+- include: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index ffe6f63a2..74eb56713 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -10,11 +10,11 @@
- cassandra
loop_control:
loop_var: include_file
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
include: install_heapster.yaml
- when: "{{ openshift_metrics_heapster_standalone | bool }}"
+ when: openshift_metrics_heapster_standalone | bool
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
@@ -48,7 +48,7 @@
- name: Scaling down cluster to recognize changes
include: stop_metrics.yaml
- when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}"
+ when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c8d222c60..e8b7bea5c 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -19,7 +19,7 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
- when: "{{ openshift_metrics_install_metrics | bool }}"
+ when: openshift_metrics_install_metrics | bool
- name: Create temp directory local on control node
local_action: command mktemp -d
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index b5a1c8f06..2037e8dc3 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -20,7 +20,7 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
- command: >
{{openshift.common.client_binary}}
@@ -42,7 +42,7 @@
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_metrics_rc | length > 0 }}"
+ changed_when: metrics_metrics_rc | length > 0
- command: >
{{openshift.common.client_binary}}
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index f69bb0f11..9a2ce9267 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -41,7 +41,7 @@
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_hawkular_rc | length > 0 }}"
+ changed_when: metrics_hawkular_rc | length > 0
- command: >
{{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
@@ -63,4 +63,4 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 8a6be6237..9a5d52eb6 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -8,7 +8,7 @@
delete --ignore-not-found --selector=metrics-infra
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
register: delete_metrics
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
- name: remove rolebindings
command: >
@@ -16,4 +16,4 @@
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
index 20fc45fd4..be1e3c3a0 100644
--- a/roles/openshift_metrics/tasks/update_master_config.yaml
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.metricsPublicURL
yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 361378df3..401db4e58 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -40,24 +40,20 @@ spec:
- "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra"
- "-Dhawkular.metrics.cassandra.use-ssl"
- "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.metrics.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization"
- "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}"
- "-Dhawkular.metrics.admin-tenant=_hawkular_admin"
- "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra"
- "-Dhawkular-alerts.cassandra-use-ssl"
- "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd"
- - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.alerts.openshift.htpasswd-file=/hawkular-account/hawkular-metrics.htpasswd"
- "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization"
- "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true"
- "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true"
- "-Dcom.datastax.driver.FORCE_NIO=true"
- "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
- "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
- - "--hmw.keystore=/secrets/hawkular-metrics.keystore"
- - "--hmw.truststore=/secrets/hawkular-metrics.truststore"
- - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
- - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
env:
- name: POD_NAMESPACE
valueFrom:
@@ -67,6 +63,8 @@ spec:
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
value: "{{ 17 | oo_random_word }}"
+ - name: TRUSTSTORE_AUTHORITIES
+ value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
@@ -76,10 +74,10 @@ spec:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
volumeMounts:
- - name: hawkular-metrics-secrets
- mountPath: "/secrets"
- - name: hawkular-metrics-client-secrets
- mountPath: "/client-secrets"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
+ - name: hawkular-metrics-account
+ mountPath: "/hawkular-account"
{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
@@ -118,9 +116,9 @@ spec:
command:
- "/opt/hawkular/scripts/hawkular-metrics-liveness.py"
volumes:
- - name: hawkular-metrics-secrets
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-secrets
- - name: hawkular-metrics-client-secrets
+ secretName: hawkular-metrics-certs
+ - name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index 7c837db4d..ab998c2fb 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -34,24 +34,24 @@ spec:
- "heapster-wrapper.sh"
- "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"
- "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250"
- - "--tls_cert=/secrets/heapster.cert"
- - "--tls_key=/secrets/heapster.key"
- - "--tls_client_ca=/secrets/heapster.client-ca"
+ - "--tls_cert=/heapster-certs/tls.crt"
+ - "--tls_key=/heapster-certs/tls.key"
+ - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- "--allowed_users=%allowed_users%"
- "--metric_resolution={{openshift_metrics_resolution}}"
{% if not openshift_metrics_heapster_standalone %}
- "--wrapper.username_file=/hawkular-account/hawkular-metrics.username"
- "--wrapper.password_file=/hawkular-account/hawkular-metrics.password"
- "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status"
- - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
+ - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-metrics-certs/tls.crt&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
{% endif %}
env:
- name: STARTUP_TIMEOUT
value: "{{ openshift_metrics_startup_timeout }}"
-{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
+{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none)
or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none)
- or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
+ or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
%}
resources:
{% if (openshift_metrics_heapster_limits_cpu is not none
@@ -65,8 +65,8 @@ spec:
memory: "{{openshift_metrics_heapster_limits_memory}}"
{% endif %}
{% endif %}
-{% if (openshift_metrics_heapster_requests_cpu is not none
- or openshift_metrics_heapster_requests_memory is not none)
+{% if (openshift_metrics_heapster_requests_cpu is not none
+ or openshift_metrics_heapster_requests_memory is not none)
%}
requests:
{% if openshift_metrics_heapster_requests_cpu is not none %}
@@ -80,9 +80,11 @@ spec:
volumeMounts:
- name: heapster-secrets
mountPath: "/secrets"
+ - name: heapster-certs
+ mountPath: "/heapster-certs"
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
- mountPath: "/hawkular-cert"
+ - name: hawkular-metrics-certs
+ mountPath: "/hawkular-metrics-certs"
- name: hawkular-metrics-account
mountPath: "/hawkular-account"
readinessProbe:
@@ -94,10 +96,13 @@ spec:
- name: heapster-secrets
secret:
secretName: heapster-secrets
+ - name: heapster-certs
+ secret:
+ secretName: heapster-certs
{% if not openshift_metrics_heapster_standalone %}
- - name: hawkular-metrics-certificate
+ - name: hawkular-metrics-certs
secret:
- secretName: hawkular-metrics-certificate
+ secretName: hawkular-metrics-certs
- name: hawkular-metrics-account
secret:
secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
index 8df89127b..ce0bc2eec 100644
--- a/roles/openshift_metrics/templates/service.j2
+++ b/roles/openshift_metrics/templates/service.j2
@@ -2,6 +2,12 @@ apiVersion: "v1"
kind: "Service"
metadata:
name: "{{obj_name}}"
+{% if annotations is defined%}
+ annotations:
+{% for key, value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
{% if labels is defined%}
labels:
{% for key, value in labels.iteritems() %}
diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml
index f28c3ce48..b20957550 100644
--- a/roles/openshift_metrics/vars/openshift-enterprise.yml
+++ b/roles/openshift_metrics/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}"
-__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}"
+__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default ('3.6.0') }}"
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index bd95f8526..bf66ef1d6 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -8,4 +8,4 @@ os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 626248306..656874f56 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -34,6 +34,38 @@
dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
env_vars: "{{ openshift_node_env_vars | default(None) }}"
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+# Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+# End Disable Swap Block
+
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
- name: Install Node package
@@ -115,7 +147,7 @@
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
no_log: True
- when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
notify:
- restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 52482d09b..f58c803c4 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -25,6 +25,13 @@
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "{{ openshift.common.service_type }}-node.service.j2"
+ register: install_node_result
+ when: not openshift.common.is_containerized | bool
+
- name: Create the openvswitch service env file
template:
src: openvswitch.sysconfig.j2
@@ -115,6 +122,5 @@
- name: Reload systemd units
command: systemctl daemon-reload
- when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed
notify:
- restart node
diff --git a/roles/openshift_node/templates/atomic-openshift-node.service.j2 b/roles/openshift_node/templates/atomic-openshift-node.service.j2
new file mode 100644
index 000000000..80232094a
--- /dev/null
+++ b/roles/openshift_node/templates/atomic-openshift-node.service.j2
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Node
+After={{ openshift.docker.service_name }}.service
+After=openvswitch.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/atomic-openshift-node
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=atomic-openshift-node
+Restart=always
+RestartSec=5s
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index c42bdb7c3..d89b64b06 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,11 +1,11 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
-Requires=openvswitch.service
+Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
@@ -25,4 +25,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node/templates/origin-node.service.j2 b/roles/openshift_node/templates/origin-node.service.j2
new file mode 100644
index 000000000..8047301e6
--- /dev/null
+++ b/roles/openshift_node/templates/origin-node.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Origin Node
+After={{ openshift.docker.service_name }}.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/origin-node
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=origin-node
+Restart=always
+RestartSec=5s
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 1aa826c09..502f80434 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -6,6 +6,6 @@
- name: restart docker after updating ca trust
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index cd2f362aa..2a36d8945 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -10,4 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
+- role: lib_utils
- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
index e91891ca9..416cf605a 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
@@ -6,20 +6,6 @@
# - docker_version
# - skip_docker_restart
-# We need docker service up to remove all the images, but these services will keep
-# trying to re-start and thus re-pull the images we're trying to delete.
-- name: Stop containerized services
- service: name={{ item }} state=stopped
- with_items:
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- - etcd_container
- - openvswitch
- failed_when: false
- when: openshift.common.is_containerized | bool
-
- name: Check Docker image count
shell: "docker images -aq | wc -l"
register: docker_image_count
@@ -45,5 +31,4 @@
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
-- include: restart.yml
- when: not skip_docker_restart | default(False) | bool
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 6ae8dbc12..a41a97e01 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -9,6 +9,28 @@
# - openshift_release
# tasks file for openshift_node_upgrade
+
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Stop additional containerized services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-master-api"
+ - etcd_container
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
- include: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
@@ -16,7 +38,6 @@
when:
- l_docker_upgrade is defined
- l_docker_upgrade | bool
- - not openshift.common.is_containerized | bool
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
@@ -67,16 +88,6 @@
state: latest
when: not openshift.common.is_containerized | bool
-- name: Restart openvswitch
- systemd:
- name: openvswitch
- state: started
- when:
- - not openshift.common.is_containerized | bool
-
-# Mandatory Docker restart, ensure all containerized services are running:
-- include: docker/restart.yml
-
- name: Update oreg value
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
@@ -84,11 +95,46 @@
value: "{{ oreg_url }}"
when: oreg_url is defined
-- name: Restart rpm node service
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- when: not openshift.common.is_containerized | bool
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+ # End Disable Swap Block
+
+- name: Reset selinux context
+ command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux.status == 'enabled'
+
+# Restart all services
+- include: restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 176fc3c0b..e576228ba 100644
--- a/roles/openshift_node_upgrade/tasks/docker/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -6,13 +6,15 @@
# - openshift.master.api_port
- name: Restart docker
- service: name=docker state=restarted
+ service:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
- name: Update docker facts
openshift_facts:
role: docker
-- name: Restart containerized services
+- name: Start services
service: name={{ item }} state=started
with_items:
- etcd_container
@@ -22,7 +24,6 @@
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
wait_for:
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 0ff398152..2a099301a 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -1,11 +1,11 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
-Requires=openvswitch.service
+Wants=openvswitch.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
Requires={{ openshift.common.service_type }}-node-dep.service
@@ -23,4 +23,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 57279c665..b53b6afa1 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -65,6 +65,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
- failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+ failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
check_mode: no
when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
index 124bff09d..09364c26f 100644
--- a/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
+++ b/roles/openshift_repos/files/origin/repos/openshift-ansible-centos-paas-sig.repo
@@ -3,7 +3,7 @@ name=CentOS OpenShift Origin
baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin/
enabled=1
gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/openshift-ansible-CentOS-SIG-PaaS
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
[centos-openshift-origin-testing]
name=CentOS OpenShift Origin Testing
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 84a0905cc..023b1a9b7 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -24,15 +24,19 @@
- openshift_additional_repos | length == 0
notify: refresh cache
+ # Note: OpenShift repositories under CentOS may be shipped through the
+ # "centos-release-openshift-origin" package which configures the repository.
+ # This task matches the file names provided by the package so that they are
+ # not installed twice in different files and remains idempotent.
- name: Configure origin gpg keys if needed
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
with_items:
- src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS
- dest: /etc/pki/rpm-gpg/
+ dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
- src: origin/repos/openshift-ansible-centos-paas-sig.repo
- dest: /etc/yum.repos.d/
+ dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo
notify: refresh cache
when:
- ansible_os_family == "RedHat"
@@ -40,4 +44,21 @@
- openshift_deployment_type == 'origin'
- openshift_enable_origin_repo | default(true) | bool
+ # Singleton block
+ - when: r_osr_first_run | default(true)
+ block:
+ - name: Ensure clean repo cache in the event repos have been changed manually
+ debug:
+ msg: "First run of openshift_repos"
+ changed_when: true
+ notify: refresh cache
+
+ - name: Set fact r_osr_first_run false
+ set_fact:
+ r_osr_first_run: false
+
+ # Force running ALL handlers now, because we expect repo cache to be cleared
+ # if changes have been made.
+ - meta: flush_handlers
+
when: not ostree_booted.stat.exists
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
new file mode 100644
index 000000000..7b310dbf8
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -0,0 +1,94 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Host Groups
+-----------
+
+The following group is expected to be populated for this role to run:
+
+* `[glusterfs]`
+
+Additionally, the following group may be specified either in addition to or
+instead of the above group to deploy a GlusterFS cluster for use by a natively
+hosted Docker registry:
+
+* `[glusterfs_registry]`
+
+Role Variables
+--------------
+
+This role has the following variables that control the integration of a
+GlusterFS cluster into a new or existing OpenShift cluster:
+
+| Name | Default value | |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Each role variable also has a corresponding variable to optionally configure a
+separate GlusterFS cluster for use as storage for an integrated Docker
+registry. These variables start with the prefix
+`openshift_storage_glusterfs_registry_` and, for the most part, default to the
+values in their corresponding non-registry variables. The following variables
+are an exception:
+
+| Name | Default value | |
+|---------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+
+Additionally, this role's behavior responds to the following registry-specific
+variable:
+
+| Name | Default value | Description |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
new file mode 100644
index 000000000..ebe9ca30b
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -0,0 +1,36 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: ''
+openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+
+openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
+openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
new file mode 100644
index 000000000..c9945be13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -0,0 +1,115 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+labels:
+ template: deploy-heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: deploy-heketi
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ name: deploy-heketi
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ name: deploy-heketi
+ glusterfs: deploy-heketi-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: deploy-heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
new file mode 100644
index 000000000..3f8d8f507
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-registry-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
new file mode 100644
index 000000000..c66705752
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -0,0 +1,128 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs: daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs-node: pod
+ template:
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs-node: pod
+ spec:
+ nodeSelector:
+ storagenode: glusterfs
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
new file mode 100644
index 000000000..df045c170
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -0,0 +1,113 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+labels:
+ template: heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-route
+ spec:
+ to:
+ kind: Service
+ name: heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-pod
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-storage-endpoints
+ path: heketidbstorage
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
new file mode 100644
index 000000000..88801e487
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Storage GlusterFS Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
new file mode 100644
index 000000000..aab9851f9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jose A. Rivera
+ description: OpenShift GlusterFS Cluster
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
new file mode 100644
index 000000000..fa5fa2cb0
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -0,0 +1,166 @@
+---
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ glusterfs_namespace }}"
+ when: glusterfs_is_native or glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,dc,jobs,secret"
+ selector: "deploy-heketi"
+ - kind: "template,route,service,dc"
+ name: "heketi"
+ - kind: "svc,ep"
+ name: "heketi-storage-endpoints"
+ - kind: "sa"
+ name: "heketi-service-account"
+ failed_when: False
+ when: glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ glusterfs_namespace }}"
+ name: heketi-service-account
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ glusterfs_heketi_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+ set_fact:
+ glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_is_missing
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
new file mode 100644
index 000000000..451990240
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -0,0 +1,22 @@
+---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+ glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+
+- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
new file mode 100644
index 000000000..579112349
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -0,0 +1,107 @@
+---
+- assert:
+ that: "glusterfs_nodeselector.keys() | count == 1"
+ msg: Only one GlusterFS nodeselector key pair should be provided
+
+- assert:
+ that: "glusterfs_nodes | count >= 3"
+ msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "template,daemonset"
+ name: glusterfs
+ state: absent
+ when: glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ item }}"
+ kind: node
+ state: absent
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+ when: glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ when: glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ failed_when: False
+ when: glusterfs_wipe
+
+ # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ when:
+ - glusterfs_wipe
+ - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+
+- name: Label GlusterFS nodes
+ oc_label:
+ name: "{{ glusterfs_host }}"
+ kind: node
+ state: add
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ loop_control:
+ loop_var: glusterfs_host
+
+- name: Copy GlusterFS DaemonSet template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: glusterfs
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "glusterfs"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_image }}"
+ IMAGE_VERSION: "{{ glusterfs_version }}"
+
+- name: Wait for GlusterFS pods
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs-node=pod"
+ register: glusterfs_pods
+ until:
+ - "glusterfs_pods.results.results[0]['items'] | count > 0"
+ # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
new file mode 100644
index 000000000..392f4b65b
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -0,0 +1,71 @@
+---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
+ glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+
+- include: glusterfs_common.yml
+ when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+
+- name: Delete pre-existing GlusterFS registry resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "svc,ep"
+ name: "glusterfs-registry-endpoints"
+ failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: endpoints
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ kind: service
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
+ register: registry_volume
+
+- name: Create GlusterFS registry volume
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
new file mode 100644
index 000000000..c14fcfb15
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -0,0 +1,41 @@
+---
+- name: Copy initial heketi resource files
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "deploy-heketi-template.yml"
+
+- name: Create deploy-heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: deploy-heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "deploy-heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
new file mode 100644
index 000000000..64410a9ab
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -0,0 +1,109 @@
+---
+- name: Create heketi DB volume
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ register: setup_storage
+ failed_when: False
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+ when: setup_storage.rc == 0
+
+- name: Wait for copy job to finish
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: job
+ state: list
+ name: "heketi-storage-copy-job"
+ register: heketi_job
+ until:
+ - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+ # Pod's 'Complete' status must be True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ failed_when:
+ - "'results' in heketi_job.results"
+ - "heketi_job.results.results | count > 0"
+ # Fail when pod's 'Failed' status is True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ when: setup_storage.rc == 0
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ failed_when: False
+
+- name: Copy heketi template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+ dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+
+- name: Wait for heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs=heketi-service"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+
+- name: Set heketi URL
+ set_fact:
+ glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
new file mode 100644
index 000000000..ebd8db453
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- include: glusterfs_config.yml
+ when:
+ - g_glusterfs_hosts | default([]) | count > 0
+
+- include: glusterfs_registry.yml
+ when:
+ - g_glusterfs_registry_hosts | default([]) | count > 0
+ - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..605627ab5
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-registry-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
new file mode 100644
index 000000000..33d8f9b36
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
@@ -0,0 +1,39 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+ "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}"
+ ],
+ "storage": [
+ "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 37c80c29e..ca896addd 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -16,3 +16,4 @@ dependencies:
- role: openshift_docker_facts
- role: docker
when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+- role: lib_utils
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c3d001bb4..f2f4d16f0 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -3,12 +3,18 @@
- set_fact:
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
+ is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
when:
- is_containerized | bool
- openshift.common.deployment_type == 'origin'
@@ -27,7 +33,10 @@
when: openshift_release is defined
# Verify that the image tag is in a valid format
-- block:
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
# Verifies that when the deployment type is origin the version:
# - starts with a v
@@ -35,12 +44,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers, letters, dashes and dots.
- - name: Verify Origin openshift_image_tag is valid
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
assert:
that:
- "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
- when: openshift.common.deployment_type == 'origin'
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Verifies that when the deployment type is openshift-enterprise the version:
# - starts with a v
@@ -48,16 +59,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
- - name: Verify Enterprise openshift_image_tag is valid
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
- when: openshift.common.deployment_type == 'openshift-enterprise'
-
- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -82,6 +91,26 @@
include: set_version_containerized.yml
when: is_containerized | bool
+- block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when: openshift_rpm_version != openshift_version
+ when:
+ - is_containerized | bool
+ - not is_atomic | bool
+
# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
# NOTE: This will need to be modified/removed for future container + rpm installations work.
- name: Warn if openshift_image_tag is defined when not doing a containerized install
@@ -119,30 +148,42 @@
- fail:
msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
when: openshift_version is not defined
- fail:
msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
when: openshift_image_tag is not defined
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
when: openshift_pkg_version is not defined
- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
when:
- not is_containerized | bool
- openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
+- name: For an RPM install, abort when the release requested does not match the available version.
when:
- not is_containerized | bool
- openshift_release is defined
- - not openshift_version.startswith(openshift_release) | bool
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c7604af1a..c40777bf1 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -7,42 +7,18 @@
- openshift_pkg_version is defined
- openshift_version is not defined
-# if {{ openshift.common.service_type}}-excluder is enabled,
-# the repoquery for {{ openshift.common.service_type}} will not work.
-# Thus, create a temporary yum,conf file where exclude= is set to an empty list
-- name: Create temporary yum.conf file
- command: mktemp -d /tmp/yum.conf.XXXXXX
- register: yum_conf_temp_file_result
+- block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
-- set_fact:
- yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf"
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
-- name: Copy yum.conf into the temporary file
- copy:
- src: /etc/yum.conf
- dest: "{{ yum_conf_temp_file }}"
- remote_src: True
-
-- name: Clear the exclude= list in the temporary yum.conf
- lineinfile:
- # since ansible 2.3 s/dest/path
- dest: "{{ yum_conf_temp_file }}"
- regexp: '^exclude='
- line: 'exclude='
-
-- name: Gather common package version
- command: >
- {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}"
- register: common_version
- failed_when: false
- changed_when: false
- when: openshift_version is not defined
-
-- name: Delete the temporary yum.conf
- file:
- path: "{{ yum_conf_temp_file_result.stdout }}"
- state: absent
-
-- set_fact:
- openshift_version: "{{ common_version.stdout | default('0.0', True) }}"
- when: openshift_version is not defined
+ - set_fact:
+ openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ when:
+ - openshift_version is not defined
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index 43db3cc74..e7ef544f4 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -17,7 +17,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | True | If false, use iptables |
+| os_firewall_use_firewalld | False | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index 4c544122f..01859e5fc 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -2,6 +2,6 @@
os_firewall_enabled: True
# firewalld is not supported on Atomic Host
# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}"
+os_firewall_use_firewalld: "{{ False }}"
os_firewall_allow: []
os_firewall_deny: []
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 8d4878fa7..aeee3ede8 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
import subprocess
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 4b2979887..509655b0c 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -14,7 +14,7 @@
- iptables
- ip6tables
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling iptables
pause:
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 38ea2477c..55f2fc471 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -7,7 +7,7 @@
enabled: no
masked: yes
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling firewalld
pause:
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 41673ee40..ea0c42150 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -7,7 +7,7 @@
when: deployment_type == 'enterprise'
- set_fact:
- default_ose_version: '3.4'
+ default_ose_version: '3.5'
when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
- set_fact:
@@ -16,10 +16,13 @@
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] )
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
--enable="rhel-7-server-rpms" \
--enable="rhel-7-server-extras-rpms" \
- --enable="rhel-7-server-ose-{{ ose_version }}-rpms"
+ --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \
+ --enable="rhel-7-fast-datapath-rpms"
+ register: subscribe_repos
+ until: subscribe_repos | succeeded
diff --git a/setup.py b/setup.py
index 2ad26110b..c6a132ae2 100644
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@ import os
import fnmatch
import re
import sys
+import subprocess
import yaml
# Always prefer setuptools over distutils
@@ -199,6 +200,52 @@ class OpenShiftAnsibleGenerateValidation(Command):
print('\nAll generate scripts passed.\n')
+class OpenShiftAnsibleSyntaxCheck(Command):
+ ''' Command to run Ansible syntax check'''
+ description = "Run Ansible syntax check"
+ user_options = []
+
+ # Colors
+ FAIL = '\033[91m' # Red
+ ENDC = '\033[0m' # Reset
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ pass
+
+ def run(self):
+ ''' run command '''
+
+ has_errors = False
+
+ for yaml_file in find_files(
+ os.path.join(os.getcwd(), 'playbooks', 'byo'),
+ None, None, r'\.ya?ml$'):
+ with open(yaml_file, 'r') as contents:
+ for line in contents:
+ # initialize_groups.yml is used to identify entry point playbooks
+ if re.search(r'initialize_groups\.yml', line):
+ print('-' * 60)
+ print('Syntax checking playbook: %s' % yaml_file)
+ try:
+ subprocess.check_output(
+ ['ansible-playbook', '-i localhost,',
+ '--syntax-check', yaml_file]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
+ # Break for loop, no need to continue looping lines
+ break
+ if has_errors:
+ raise SystemExit(1)
+
+
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
@@ -242,6 +289,7 @@ setup(
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
+ 'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
diff --git a/test-requirements.txt b/test-requirements.txt
index 805828e1c..585cca0b9 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,14 @@
+# Versions are pinned to prevent pypi releases arbitrarily breaking
+# tests with new APIs/semantics. We want to update versions deliberately.
+
# flake8 must be listed before pylint to avoid dependency conflicts
-flake8
-flake8-mutable
-flake8-print
-pylint
-setuptools-lint
-yamllint
-coverage
-mock
-pytest
-pytest-cov
+flake8==3.3.0
+flake8-mutable==1.1.0
+flake8-print==2.0.2
+pylint==1.6.5
+setuptools-lint==0.5.2
+yamllint==1.6.1
+coverage==4.3.4
+mock==2.0.0
+pytest==3.0.7
+pytest-cov==2.4.0
diff --git a/test/integration/README.md b/test/integration/README.md
new file mode 100644
index 000000000..948e44c50
--- /dev/null
+++ b/test/integration/README.md
@@ -0,0 +1,39 @@
+# Integration tests
+
+Integration tests exercise the OpenShift Ansible playbooks by running them
+against an inventory with Docker containers as hosts.
+
+## Requirements
+
+The tests assume that:
+
+* docker is running on localhost and the present user has access to use it.
+* golang is installed and the go binary is in PATH.
+* python and tox are installed.
+
+## Building images
+
+The tests rely on images built in the local docker index. You can build them
+from the repository root with:
+
+```
+./test/integration/build-images.sh
+```
+
+Use the `--help` option to view available options.
+
+## Running the tests
+
+From the repository root, run the integration tests with:
+
+```
+./test/integration/run-tests.sh
+```
+
+Use the `--help` option to view available options.
+
+You can also run tests more directly, for example to run a specific check:
+
+```
+go test ./test/integration/... -run TestPackageUpdateDepMissing
+```
diff --git a/test/integration/build-images.sh b/test/integration/build-images.sh
new file mode 100755
index 000000000..74a55fa51
--- /dev/null
+++ b/test/integration/build-images.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+# This is intended to run either locally (in which case a push is not
+# necessary) or in a CI job (where the results should be pushed to a
+# registry for use in later CI test jobs). Images are tagged locally with
+# both the base name (e.g. "test-target-base") and with the prefix given;
+# then only the prefixed name is pushed if --push is specified, assuming
+# any necessary credentials are available for the push. The same prefix
+# can then be used for the testing script. By default a local (non-registry)
+# prefix is used and no push can occur. To push to e.g. dockerhub:
+#
+# ./build-images.sh --push --prefix=docker.io/openshift/ansible-integration-
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+source_root=$(dirname "${0}")
+
+prefix="${PREFIX:-openshift-ansible-integration-}"
+push=false
+verbose=false
+build_options="${DOCKER_BUILD_OPTIONS:-}"
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --push)
+ push=true
+ ;;
+ --no-cache)
+ build_options="${build_options} --no-cache"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+if [ "$help" = true ]; then
+ echo "Builds the docker images for openshift-ansible integration tests"
+ echo "and pushes them to a central registry."
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift-ansible-integration-"
+ echo
+ echo " --push"
+ echo " If set will push the tagged image"
+ echo
+ echo " --no-cache"
+ echo " If set will perform the build without a cache."
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+
+declare -a build_order ; declare -A images
+build_order+=( test-target-base ) ; images[test-target-base]=openshift_health_checker/builds/test-target-base
+build_order+=( preflight-aos-package-checks ); images[preflight-aos-package-checks]=openshift_health_checker/builds/aos-package-checks
+for image in "${build_order[@]}"; do
+ BUILD_STARTTIME=$(date +%s)
+ docker_tag=${prefix}${image}
+ echo
+ echo "--- Building component '$image' with docker tag '$docker_tag' ---"
+ docker build ${build_options} -t $image -t $docker_tag "$source_root/${images[$image]}"
+ echo
+ BUILD_ENDTIME=$(date +%s); echo "--- build $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+ if [ "$push" = true ]; then
+ docker push $docker_tag
+ PUSH_ENDTIME=$(date +%s); echo "--- push $docker_tag took $(($PUSH_ENDTIME - $BUILD_ENDTIME)) seconds ---"
+ fi
+done
+
+echo
+echo
+echo "++ Active images"
+docker images | grep ${prefix} | sort
+echo
+
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
new file mode 100644
index 000000000..8542029f6
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
@@ -0,0 +1,30 @@
+FROM test-target-base
+
+RUN yum install -y rpm-build rpmdevtools createrepo && \
+ rpmdev-setuptree && \
+ mkdir -p /mnt/localrepo
+ADD root /
+
+# we will build some RPMs that can be used to break yum update in tests.
+RUN cd /root/rpmbuild/SOURCES && \
+ mkdir break-yum-update-1.0 && \
+ tar zfc foo.tgz break-yum-update-1.0 && \
+ rpmbuild -bb /root/break-yum-update.spec && \
+ yum install -y /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-1.noarch.rpm && \
+ rpmbuild -bb /root/break-yum-update-2.spec && \
+ mkdir /mnt/localrepo/break-yum && \
+ cp /root/rpmbuild/RPMS/noarch/break-yum-update-1.0-2.noarch.rpm /mnt/localrepo/break-yum && \
+ createrepo /mnt/localrepo/break-yum
+
+# we'll also build some RPMs that can be used to exercise OCP package version tests.
+RUN cd /root/rpmbuild/SOURCES && \
+ mkdir atomic-openshift-3.2 && \
+ mkdir atomic-openshift-3.3 && \
+ tar zfc ose.tgz atomic-openshift-3.{2,3} && \
+ rpmbuild -bb /root/ose-3.2.spec && \
+ rpmbuild -bb /root/ose-3.3.spec && \
+ mkdir /mnt/localrepo/ose-3.{2,3} && \
+ cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.2-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
+ createrepo /mnt/localrepo/ose-3.2 && \
+ cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.3-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
+ createrepo /mnt/localrepo/ose-3.3
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo
new file mode 100644
index 000000000..f5ccd2d19
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/break-yum.repo
@@ -0,0 +1,5 @@
+[break-yum]
+name=break-yum
+baseurl=file:///mnt/localrepo/break-yum
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo
new file mode 100644
index 000000000..3064d6dbb
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.2.repo
@@ -0,0 +1,5 @@
+[ose-3.2]
+name=ose-3.2
+baseurl=file:///mnt/localrepo/ose-3.2
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo
new file mode 100644
index 000000000..1466da476
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/etc/yum.repos.d/ose-3.3.repo
@@ -0,0 +1,5 @@
+[ose-3.3]
+name=ose-3.3
+baseurl=file:///mnt/localrepo/ose-3.3
+enabled=0
+gpgcheck=0
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec
new file mode 100644
index 000000000..ebd7eb443
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update-2.spec
@@ -0,0 +1,33 @@
+Name: break-yum-update
+Version: 1.0
+Release: 2
+Summary: Package for breaking updates by requiring things that don't exist
+
+License: NA
+
+Requires: package-that-does-not-exist
+Source0: http://example.com/foo.tgz
+BuildArch: noarch
+
+%description
+Package for breaking updates by requiring things that don't exist
+
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec
new file mode 100644
index 000000000..c40675f90
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/break-yum-update.spec
@@ -0,0 +1,32 @@
+Name: break-yum-update
+Version: 1.0
+Release: 1
+Summary: Package for breaking updates by requiring things that don't exist
+
+License: NA
+
+Source0: http://example.com/foo.tgz
+BuildArch: noarch
+
+%description
+Package for breaking updates by requiring things that don't exist
+
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
new file mode 100644
index 000000000..dbc9f0c8e
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
@@ -0,0 +1,44 @@
+Name: atomic-openshift
+Version: 3.2
+Release: 1
+Summary: package the critical aos packages
+
+License: NA
+
+Source0: http://example.com/ose.tgz
+BuildArch: noarch
+
+%package master
+Summary: package the critical aos packages
+%package node
+Summary: package the critical aos packages
+
+%description
+Package for pretending to provide AOS
+
+%description master
+Package for pretending to provide AOS
+
+%description node
+Package for pretending to provide AOS
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%files master
+%files node
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
new file mode 100644
index 000000000..9546e8430
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
@@ -0,0 +1,44 @@
+Name: atomic-openshift
+Version: 3.3
+Release: 1
+Summary: package the critical aos packages
+
+License: NA
+
+Source0: http://example.com/ose.tgz
+BuildArch: noarch
+
+%package master
+Summary: package the critical aos packages
+%package node
+Summary: package the critical aos packages
+
+%description
+Package for pretending to provide AOS
+
+%description master
+Package for pretending to provide AOS
+
+%description node
+Package for pretending to provide AOS
+
+%prep
+%setup -q
+
+
+%build
+
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT
+
+
+%files
+%files master
+%files node
+%doc
+
+
+
+%changelog
diff --git a/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile
new file mode 100644
index 000000000..39b33c057
--- /dev/null
+++ b/test/integration/openshift_health_checker/builds/test-target-base/Dockerfile
@@ -0,0 +1,2 @@
+FROM centos/systemd
+RUN yum install -y iproute python-dbus PyYAML yum-utils
diff --git a/test/integration/openshift_health_checker/common.go b/test/integration/openshift_health_checker/common.go
new file mode 100644
index 000000000..a92d6861d
--- /dev/null
+++ b/test/integration/openshift_health_checker/common.go
@@ -0,0 +1,99 @@
+package test
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+// A PlaybookTest executes a given Ansible playbook and checks the exit code and
+// output contents.
+type PlaybookTest struct {
+ // inputs
+ Path string
+ // expected outputs
+ ExitCode int
+ Output []string // zero or more strings that should be in the output
+}
+
+// Run runs the PlaybookTest.
+func (p PlaybookTest) Run(t *testing.T) {
+ // A PlaybookTest is intended to be run in parallel with other tests.
+ t.Parallel()
+
+ cmd := exec.Command("ansible-playbook", "-i", "/dev/null", p.Path)
+ cmd.Env = append(os.Environ(), "ANSIBLE_FORCE_COLOR=1")
+ b, err := cmd.CombinedOutput()
+
+ // Check exit code.
+ if (err == nil) && (p.ExitCode != 0) {
+ p.checkExitCode(t, 0, p.ExitCode, cmd, b)
+ }
+ if (err != nil) && (p.ExitCode == 0) {
+ got, ok := getExitCode(err)
+ if !ok {
+ t.Logf("unexpected error (%T): %[1]v", err)
+ p.logCmdAndOutput(t, cmd, b)
+ t.FailNow()
+ }
+ p.checkExitCode(t, got, p.ExitCode, cmd, b)
+ }
+
+ // Check output contents.
+ var missing []string
+ for _, s := range p.Output {
+ if !bytes.Contains(b, []byte(s)) {
+ missing = append(missing, s)
+ }
+ }
+ if len(missing) > 0 {
+ t.Logf("missing in output: %q", missing)
+ p.logCmdAndOutput(t, cmd, b)
+ t.FailNow()
+ }
+}
+
+// getExitCode returns an exit code and true if the exit code could be taken
+// from err, false otherwise.
+// The implementation is GOOS-specific, and currently only supports Linux.
+func getExitCode(err error) (int, bool) {
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ return -1, false
+ }
+ waitStatus, ok := exitErr.Sys().(syscall.WaitStatus)
+ if !ok {
+ return -1, false
+ }
+ return waitStatus.ExitStatus(), true
+}
+
+// checkExitCode marks the test as failed when got is different than want.
+func (p PlaybookTest) checkExitCode(t *testing.T, got, want int, cmd *exec.Cmd, output []byte) {
+ if got == want {
+ return
+ }
+ t.Logf("got exit code %v, want %v", got, want)
+ p.logCmdAndOutput(t, cmd, output)
+ t.FailNow()
+}
+
+// logCmdAndOutput logs how to re-run a command and a summary of the output of
+// its last execution for debugging.
+func (p PlaybookTest) logCmdAndOutput(t *testing.T, cmd *exec.Cmd, output []byte) {
+ const maxLines = 10
+ lines := bytes.Split(bytes.TrimRight(output, "\n"), []byte("\n"))
+ if len(lines) > maxLines {
+ lines = append([][]byte{[]byte("...")}, lines[len(lines)-maxLines:len(lines)]...)
+ }
+ output = bytes.Join(lines, []byte("\n"))
+ dir, err := filepath.Abs(cmd.Dir)
+ if err != nil {
+ panic(err)
+ }
+ t.Logf("\n$ (cd %s && %s)\n%s", dir, strings.Join(cmd.Args, " "), output)
+}
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
new file mode 100644
index 000000000..31d0d521e
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -0,0 +1,20 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+
+- name: Fail as required packages cannot be installed
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_availability' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
new file mode 100644
index 000000000..16ff41673
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_succeeds.yml
@@ -0,0 +1,20 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: origin
+
+- name: Succeeds as Origin packages are public
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_availability' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
new file mode 100644
index 000000000..7b6e71f91
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -0,0 +1,24 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when a dependency required for update is missing
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
new file mode 100644
index 000000000..c2e9c3866
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -0,0 +1,31 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when a repo definition is completely broken
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - name: Break the break-yum repo
+ replace:
+ dest: /etc/yum.repos.d/break-yum.repo
+ backup: no
+ regexp: "^baseurl"
+ replace: "#baseurl"
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
new file mode 100644
index 000000000..98d41aad4
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -0,0 +1,21 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Succeeds when nothing blocks a yum update
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
new file mode 100644
index 000000000..60ab9942a
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -0,0 +1,27 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Fails when repo content is not available
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "break-yum" }
+
+ - name: Remove the local repo entirely
+ file: path=/mnt/localrepo state=absent
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_update' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
new file mode 100644
index 000000000..cd60dee5a
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -0,0 +1,24 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+ openshift_release: 3.2
+
+- name: Success when AOS version matches openshift_release
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
new file mode 100644
index 000000000..5939a1ef1
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -0,0 +1,24 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ deployment_type: openshift-enterprise
+ openshift_release: 3.3
+
+- name: Failure when AOS version doesn't match openshift_release
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
new file mode 100644
index 000000000..be0f9bc7a
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -0,0 +1,26 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: openshift-enterprise
+
+- name: Fails when multiple AOS versions are available
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3" }
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
new file mode 100644
index 000000000..da3f6b844
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_origin.yml
@@ -0,0 +1,20 @@
+---
+- include: ../../setup_container.yml
+ vars:
+ image: preflight-aos-package-checks
+ l_host_vars:
+ openshift_deployment_type: origin
+
+- name: Succeeds with Origin although multiple versions are available
+ hosts: all
+ roles:
+ - openshift_health_checker
+ tasks:
+ - block:
+
+ - action: openshift_health_check
+ args:
+ checks: [ 'package_version' ]
+
+ always: # destroy the container whether check passed or not
+ - include: ../../teardown_container.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/test/integration/openshift_health_checker/preflight/playbooks/roles
index 6bc1a7aef..6bc1a7aef 120000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
+++ b/test/integration/openshift_health_checker/preflight/playbooks/roles
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
new file mode 100644
index 000000000..aaacf205e
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
@@ -0,0 +1,9 @@
+---
+- name: Enable {{ repo_name }} repo
+ # believe it or not we can't use the yum_repository module for this.
+ # https://github.com/ansible/ansible-modules-extras/issues/2384
+ ini_file:
+ dest: /etc/yum.repos.d/{{ repo_name }}.repo
+ section: "{{ repo_name }}"
+ option: enabled
+ value: 1
diff --git a/test/integration/openshift_health_checker/preflight/preflight_test.go b/test/integration/openshift_health_checker/preflight/preflight_test.go
new file mode 100644
index 000000000..05ddf139f
--- /dev/null
+++ b/test/integration/openshift_health_checker/preflight/preflight_test.go
@@ -0,0 +1,105 @@
+package preflight
+
+import (
+ "testing"
+
+ . ".."
+)
+
+func TestPackageUpdateDepMissing(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_dep_missing.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Could not perform a yum update.",
+ "break-yum-update-1.0-2.noarch requires package-that-does-not-exist",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoBroken(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_broken.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Error with yum repository configuration: Cannot find a valid baseurl for repo",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoDisabled(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_disabled.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_update",
+ },
+ }.Run(t)
+}
+
+func TestPackageUpdateRepoUnreachable(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_update_repo_unreachable.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_update\":",
+ "Error getting data from at least one yum repository",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMatches(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_matches.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_version",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMismatches(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_mismatches.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_version\":",
+ "Not all of the required packages are available at requested version",
+ },
+ }.Run(t)
+}
+
+func TestPackageVersionMultiple(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_version_multiple.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_version\":",
+ "Multiple minor versions of these packages are available",
+ },
+ }.Run(t)
+}
+
+func TestPackageAvailabilityMissingRequired(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_availability_missing_required.yml",
+ ExitCode: 2,
+ Output: []string{
+ "check \"package_availability\":",
+ "Cannot install all of the necessary packages.",
+ "atomic-openshift",
+ },
+ }.Run(t)
+}
+
+func TestPackageAvailabilitySucceeds(t *testing.T) {
+ PlaybookTest{
+ Path: "playbooks/package_availability_succeeds.yml",
+ ExitCode: 0,
+ Output: []string{
+ "CHECK [package_availability",
+ },
+ }.Run(t)
+}
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
new file mode 100644
index 000000000..8793d954e
--- /dev/null
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -0,0 +1,45 @@
+---
+# Include this play once for each container you want to create and use as a test host.
+#
+# Optional parameters on the include are as follows:
+# * scenario = unique name for the container to be started
+# * image = name of the image to start in the container
+# * command = command to run in the container
+# * l_groups = host groups that the container should be added to
+# * l_host_vars = any variables that should be added to the host
+
+- name: Start container for specified test host
+ gather_facts: no
+ hosts: localhost
+ connection: local
+ tasks:
+
+ - set_fact:
+ # This is a little weird but if we use a var instead of a fact,
+ # a different random value is generated for each task. See:
+ # https://opensolitude.com/2015/05/27/ansible-lookups-variables-vs-facts.html
+ container_name: openshift_ansible_test_{{ scenario | default(100000000000000 | random) }}
+
+ - name: start container
+ docker_container:
+ name: "{{ container_name }}"
+ image: "{{ lookup('env', 'IMAGE_PREFIX') | default('openshift-ansible-integration-', true) }}{{ image | default('test-target-base') }}"
+ command: "{{ command | default('sleep 1800') }}"
+ recreate: yes
+ # NOTE: When/if we need to run containers that are docker hosts as well:
+ # volumes: [ "/var/run/docker.sock:/var/run/docker.sock:z" ]
+
+ - name: add container as host in inventory
+ add_host:
+ ansible_connection: docker
+ name: "{{ container_name }}"
+ groups: '{{ l_groups | default("masters,nodes,etcd") }}'
+
+ # There ought to be a better way to transfer the host vars, but see:
+ # https://groups.google.com/forum/#!topic/Ansible-project/Jwx8RYhqxPA
+ - name: set host facts per test parameters
+ set_fact:
+ "{{ item.key }}": "{{ item.value }}"
+ delegate_facts: True
+ delegate_to: "{{ container_name }}"
+ with_dict: "{{ l_host_vars | default({}) }}"
diff --git a/test/integration/openshift_health_checker/teardown_container.yml b/test/integration/openshift_health_checker/teardown_container.yml
new file mode 100644
index 000000000..fe11e2617
--- /dev/null
+++ b/test/integration/openshift_health_checker/teardown_container.yml
@@ -0,0 +1,23 @@
+---
+
+# Include this to delete the current test host container.
+#
+# In order to recover from test exceptions, this cleanup is expected to
+# be done in an "always:" task on the same block as the test task(s). So
+# it happens in a task "on" the host being tested. In order to delete the
+# host's container, the task uses its own hostname (which is same as the
+# container name) but delegates the docker action to localhost.
+
+- block:
+
+ # so handlers don't break the test by trying to run after teardown:
+ - meta: flush_handlers
+
+ always:
+
+ - name: delete test container
+ delegate_to: localhost
+ connection: local
+ docker_container:
+ name: "{{ inventory_hostname }}"
+ state: absent
diff --git a/test/integration/run-tests.sh b/test/integration/run-tests.sh
new file mode 100755
index 000000000..680b64602
--- /dev/null
+++ b/test/integration/run-tests.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# This script runs the golang integration tests in the directories underneath.
+# It should be run from the same directory it is in, or in a directory above.
+# Specify the same image prefix used (if any) with build-images.sh
+#
+# Example:
+# ./run-tests.sh --prefix=docker.io/openshift/ansible-integration- --parallel=16
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source_root=$(dirname "${0}")
+
+prefix="${PREFIX:-openshift-ansible-integration-}"
+gotest_options="${GOTEST_OPTIONS:--v}"
+push=false
+verbose=false
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --parallel=*)
+ gotest_options="${gotest_options} -parallel ${args#*=}"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+if [ "$help" = true ]; then
+ echo "Runs the openshift-ansible integration tests."
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift-ansible-integration-"
+ echo
+ echo " --parallel=NUMBER"
+ echo " Number of tests to run in parallel."
+ echo " default: GOMAXPROCS (typically, number of processors)"
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+
+
+if ! [ -d $source_root/../../.tox/integration ]; then
+ # have tox create a consistent virtualenv
+ pushd $source_root/../..; tox -e integration; popd
+fi
+# use the virtualenv from tox
+set +o nounset; source $source_root/../../.tox/integration/bin/activate; set -o nounset
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+# Run the tests. NOTE: "go test" requires a relative path for this purpose.
+# The PWD trick below will only work if cwd is in/above where this script lives.
+retval=0
+IMAGE_PREFIX="${prefix}" env -u GOPATH \
+ go test ./${source_root#$PWD}/... ${gotest_options}
+
+
diff --git a/test/openshift_version_tests.py b/test/openshift_version_tests.py
index 52e9a9888..393a4d6ba 100644
--- a/test/openshift_version_tests.py
+++ b/test/openshift_version_tests.py
@@ -44,7 +44,7 @@ class OpenShiftVersionTests(unittest.TestCase):
{'name': 'oo_version_gte_3_5_or_1_5',
'positive_enterprise_version': '3.6.0',
'negative_enterprise_version': '3.4.0',
- 'positive_origin_version': '1.6.0',
+ 'positive_origin_version': '3.6.0',
'negative_origin_version': '1.4.0'}]
def test_legacy_gte_filters(self):
diff --git a/test/modify_yaml_tests.py b/test/unit/modify_yaml_tests.py
index 0dc25df82..65b2db44c 100644
--- a/test/modify_yaml_tests.py
+++ b/test/unit/modify_yaml_tests.py
@@ -5,7 +5,7 @@ import os
import sys
import unittest
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../library/")] + sys.path
+sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../../library/")] + sys.path
# pylint: disable=import-error
from modify_yaml import set_key # noqa: E402
diff --git a/tox.ini b/tox.ini
index 643fa774d..bd27ba313 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,6 +3,7 @@ minversion=2.3.1
envlist =
py{27,35}-{flake8,pylint,unit}
py27-{yamllint,ansible_syntax,generate_validation}
+ integration
skipsdist=True
skip_missing_interpreters=True
@@ -11,7 +12,8 @@ skip_install=True
deps =
-rrequirements.txt
-rtest-requirements.txt
- py35-flake8: flake8-bugbear
+ py35-flake8: flake8-bugbear==17.3.0
+ integration: docker-py==1.10.6
commands =
unit: pip install -e utils
@@ -21,4 +23,10 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml
+ ansible_syntax: python setup.py ansible_syntax
+ # ansible 2.2.2+ unfortunately breaks the integration test runner
+ # because it can no longer set facts on the test docker hosts.
+ # So for now, install separate ansible version for integration.
+ # PR that fixes it: https://github.com/ansible/ansible/pull/23599
+ # Once that PR is available, drop this and use same ansible.
+ integration: pip install ansible==2.2.1.0
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index a6d784dea..71dcf87aa 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -34,6 +34,12 @@ UPGRADE_MAPPINGS = {
'3.5': {
'minor_version': '3.5',
'minor_playbook': 'v3_5/upgrade.yml',
+ 'major_playbook': 'v3_6/upgrade.yml',
+ 'major_version': '3.6',
+ },
+ '3.6': {
+ 'minor_version': '3.6',
+ 'minor_playbook': 'v3_6/upgrade.yml',
},
}
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index f25266f29..1574d447a 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -39,18 +39,19 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
- Version('3.5', 'openshift-enterprise'),
+ Version('3.6', 'openshift-enterprise'),
])
REG = Variant('openshift-enterprise', 'Registry', [
- Version('3.4', 'openshift-enterprise', 'registry'),
+ Version('3.6', 'openshift-enterprise', 'registry'),
])
origin = Variant('origin', 'OpenShift Origin', [
- Version('1.4', 'origin'),
+ Version('3.6', 'origin'),
])
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.5', 'openshift-enterprise'),
Version('3.4', 'openshift-enterprise'),
Version('3.3', 'openshift-enterprise'),
Version('3.2', 'openshift-enterprise'),