summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/meta/main.yml1
-rw-r--r--roles/ansible_service_broker/tasks/install.yml19
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/calico/meta/main.yml1
-rw-r--r--roles/calico/tasks/main.yml2
-rw-r--r--roles/calico_master/meta/main.yml1
-rw-r--r--roles/calico_master/tasks/main.yml4
-rw-r--r--roles/cockpit-ui/meta/main.yml2
-rw-r--r--roles/cockpit-ui/tasks/main.yml2
-rw-r--r--roles/cockpit/meta/main.yml2
-rw-r--r--roles/cockpit/tasks/main.yml4
-rw-r--r--roles/container_runtime/README.md4
-rw-r--r--roles/container_runtime/defaults/main.yml20
-rw-r--r--roles/container_runtime/meta/main.yml2
-rw-r--r--roles/container_runtime/tasks/common/post.yml4
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml6
-rw-r--r--roles/container_runtime/tasks/docker_storage_setup_overlay.yml10
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml29
-rw-r--r--roles/container_runtime/tasks/main.yml2
-rw-r--r--roles/container_runtime/tasks/package_docker.yml8
-rw-r--r--roles/container_runtime/tasks/registry_auth.yml2
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml15
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml8
-rw-r--r--roles/container_runtime/templates/crio-network.j29
-rw-r--r--roles/container_runtime/templates/docker_storage_setup.j212
-rw-r--r--roles/contiv/README.md4
-rw-r--r--roles/contiv/defaults/main.yml149
-rw-r--r--roles/contiv/meta/main.yml15
-rw-r--r--roles/contiv/tasks/aci.yml2
-rw-r--r--roles/contiv/tasks/api_proxy.yml120
-rw-r--r--roles/contiv/tasks/default_network.yml58
-rw-r--r--roles/contiv/tasks/download_bins.yml20
-rw-r--r--roles/contiv/tasks/etcd.yml114
-rw-r--r--roles/contiv/tasks/main.yml9
-rw-r--r--roles/contiv/tasks/netmaster.yml30
-rw-r--r--roles/contiv/tasks/netmaster_firewalld.yml23
-rw-r--r--roles/contiv/tasks/netmaster_iptables.yml51
-rw-r--r--roles/contiv/tasks/netplugin.yml33
-rw-r--r--roles/contiv/tasks/netplugin_firewalld.yml39
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml98
-rw-r--r--roles/contiv/tasks/old_version_cleanup.yml43
-rw-r--r--roles/contiv/tasks/old_version_cleanup_firewalld.yml11
-rw-r--r--roles/contiv/tasks/old_version_cleanup_iptables.yml44
-rw-r--r--roles/contiv/tasks/ovs.yml2
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml5
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml12
-rw-r--r--roles/contiv/templates/aci-gw.service6
-rw-r--r--roles/contiv/templates/aci_gw.j214
-rw-r--r--roles/contiv/templates/api-proxy-daemonset.yml.j257
-rw-r--r--roles/contiv/templates/api-proxy-secrets.yml.j212
-rw-r--r--roles/contiv/templates/contiv.cfg.j22
-rw-r--r--roles/contiv/templates/contiv.cfg.master.j22
-rw-r--r--roles/contiv/templates/etcd-daemonset.yml.j283
-rw-r--r--roles/contiv/templates/etcd-proxy-daemonset.yml.j255
-rw-r--r--roles/contiv/templates/etcd-scc.yml.j242
-rw-r--r--roles/contiv/templates/netmaster.env.j22
-rw-r--r--roles/contiv/templates/netmaster.j21
-rw-r--r--roles/contiv/templates/netmaster.service2
-rw-r--r--roles/contiv/templates/netplugin.j25
-rw-r--r--roles/contiv/templates/netplugin.service2
-rw-r--r--roles/contiv_auth_proxy/README.md29
-rw-r--r--roles/contiv_auth_proxy/defaults/main.yml12
-rw-r--r--roles/contiv_auth_proxy/files/auth-proxy.service13
-rw-r--r--roles/contiv_auth_proxy/files/cert.pem33
-rw-r--r--roles/contiv_auth_proxy/files/key.pem51
-rw-r--r--roles/contiv_auth_proxy/handlers/main.yml2
-rw-r--r--roles/contiv_auth_proxy/tasks/cleanup.yml10
-rw-r--r--roles/contiv_auth_proxy/tasks/main.yml37
-rw-r--r--roles/contiv_auth_proxy/templates/auth_proxy.j236
-rw-r--r--roles/contiv_auth_proxy/tests/inventory1
-rw-r--r--roles/contiv_auth_proxy/tests/test.yml5
-rw-r--r--roles/contiv_auth_proxy/vars/main.yml2
-rw-r--r--roles/contiv_facts/defaults/main.yaml9
-rw-r--r--roles/contiv_facts/tasks/fedora-install.yml12
-rw-r--r--roles/contiv_facts/tasks/main.yml65
-rw-r--r--roles/contiv_facts/tasks/rpm.yml8
-rw-r--r--roles/etcd/defaults/main.yaml4
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml26
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml22
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migration/migrate.yml2
-rw-r--r--roles/etcd/tasks/version_detect.yml4
-rw-r--r--roles/flannel/defaults/main.yaml2
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/meta/main.yml3
-rw-r--r--roles/flannel/tasks/main.yml12
-rw-r--r--roles/flannel_register/meta/main.yml3
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py5
-rw-r--r--roles/kuryr/meta/main.yml5
-rw-r--r--roles/kuryr/tasks/node.yaml2
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j219
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2357
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j27
-rw-r--r--roles/lib_openshift/library/conditional_set_fact.py (renamed from roles/openshift_sanitize_inventory/library/conditional_set_fact.py)18
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_scale.py351
-rw-r--r--roles/lib_os_firewall/README.md63
-rw-r--r--roles/lib_utils/action_plugins/generate_pv_pvcs_list.py (renamed from roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py)0
-rw-r--r--roles/lib_utils/action_plugins/sanity_checks.py181
-rw-r--r--roles/lib_utils/callback_plugins/aa_version_requirement.py60
-rw-r--r--roles/lib_utils/callback_plugins/openshift_quick_installer.py360
-rw-r--r--roles/lib_utils/filter_plugins/oo_cert_expiry.py (renamed from roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py)1
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py627
-rw-r--r--roles/lib_utils/filter_plugins/openshift_aws_filters.py (renamed from roles/openshift_aws/filter_plugins/openshift_aws_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_hosted_filters.py (renamed from roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_master.py (renamed from roles/openshift_master_facts/filter_plugins/openshift_master.py)6
-rwxr-xr-xroles/lib_utils/library/delegated_serial_command.py (renamed from roles/etcd/library/delegated_serial_command.py)0
-rw-r--r--roles/lib_utils/library/kubeclient_ca.py88
-rw-r--r--roles/lib_utils/library/modify_yaml.py117
-rw-r--r--roles/lib_utils/library/openshift_cert_expiry.py (renamed from roles/openshift_certificate_expiry/library/openshift_cert_expiry.py)0
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py (renamed from roles/openshift_cli/library/openshift_container_binary_sync.py)0
-rw-r--r--[-rwxr-xr-x]roles/lib_utils/library/os_firewall_manage_iptables.py (renamed from roles/lib_os_firewall/library/os_firewall_manage_iptables.py)0
-rw-r--r--roles/lib_utils/library/rpm_q.py72
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py143
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py117
-rw-r--r--roles/lib_utils/test/conftest.py (renamed from roles/openshift_certificate_expiry/test/conftest.py)53
-rw-r--r--roles/lib_utils/test/openshift_master_facts_bad_input_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_conftest.py (renamed from roles/openshift_master_facts/test/conftest.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py)0
-rw-r--r--roles/lib_utils/test/test_fakeopensslclasses.py (renamed from roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py)0
-rw-r--r--roles/lib_utils/test/test_load_and_handle_cert.py (renamed from roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py)0
-rw-r--r--roles/nickhammond.logrotate/tasks/main.yml2
-rw-r--r--roles/nuage_ca/meta/main.yml2
-rw-r--r--roles/nuage_ca/tasks/main.yaml2
-rw-r--r--roles/nuage_common/tasks/main.yml6
-rw-r--r--roles/nuage_master/meta/main.yml2
-rw-r--r--roles/nuage_master/tasks/main.yaml18
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml2
-rw-r--r--roles/nuage_node/meta/main.yml2
-rw-r--r--roles/nuage_node/tasks/main.yaml8
-rw-r--r--roles/openshift_aws/README.md6
-rw-r--r--roles/openshift_aws/defaults/main.yml35
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml3
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml8
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml2
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml1
-rw-r--r--roles/openshift_builddefaults/meta/main.yml1
-rw-r--r--roles/openshift_buildoverrides/meta/main.yml1
-rw-r--r--roles/openshift_buildoverrides/vars/main.yml1
-rw-r--r--roles/openshift_ca/meta/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml23
-rw-r--r--roles/openshift_certificate_expiry/meta/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_cli/defaults/main.yml2
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_cli/tasks/main.yml10
-rw-r--r--roles/openshift_cloud_provider/meta/main.yml1
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/vsphere.yml6
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j24
-rw-r--r--roles/openshift_cloud_provider/templates/vsphere.conf.j215
-rw-r--r--roles/openshift_cloud_provider/vars/main.yml1
-rw-r--r--roles/openshift_cluster_autoscaler/README.md2
-rw-r--r--roles/openshift_cluster_autoscaler/meta/main.yml1
-rw-r--r--roles/openshift_daemonset_config/defaults/main.yml19
-rw-r--r--roles/openshift_daemonset_config/meta/main.yml3
-rw-r--r--roles/openshift_daemonset_config/tasks/main.yml58
-rw-r--r--roles/openshift_daemonset_config/templates/daemonset.yml.j2142
-rw-r--r--roles/openshift_default_storage_class/meta/main.yml1
-rw-r--r--roles/openshift_docker_gc/meta/main.yml1
-rw-r--r--roles/openshift_etcd/meta/main.yml1
-rw-r--r--roles/openshift_etcd_client_certificates/meta/main.yml3
-rw-r--r--roles/openshift_etcd_client_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_etcd_facts/meta/main.yml1
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml4
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json3
-rw-r--r--roles/openshift_examples/meta/main.yml4
-rw-r--r--roles/openshift_examples/tasks/main.yml28
-rw-r--r--roles/openshift_excluder/tasks/install.yml10
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/README.md2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml6
-rw-r--r--roles/openshift_facts/defaults/main.yml10
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py195
-rw-r--r--roles/openshift_grafana/defaults/main.yml12
-rw-r--r--roles/openshift_grafana/files/grafana-ocp-oauth.yml661
-rw-r--r--roles/openshift_grafana/files/grafana-ocp.yml76
-rw-r--r--roles/openshift_grafana/files/openshift-cluster-monitoring.json5138
-rw-r--r--roles/openshift_grafana/meta/main.yml13
-rw-r--r--roles/openshift_grafana/tasks/gf-permissions.yml12
-rw-r--r--roles/openshift_grafana/tasks/main.yml122
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py2
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py50
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py14
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py27
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py58
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py41
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py8
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py12
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py12
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py6
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py29
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py11
-rw-r--r--roles/openshift_hosted/defaults/main.yml2
-rw-r--r--roles/openshift_hosted/meta/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry.yml25
-rw-r--r--roles/openshift_hosted/tasks/registry_storage.yml4
-rw-r--r--roles/openshift_hosted/tasks/router.yml15
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml2
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml15
-rw-r--r--roles/openshift_hosted/tasks/wait_for_pod.yml12
-rw-r--r--roles/openshift_hosted_templates/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml4
-rw-r--r--roles/openshift_hosted_templates/tasks/main.yml14
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml8
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml2
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml16
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j22
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j22
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py34
-rw-r--r--roles/openshift_logging/library/logging_patch.py112
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py17
-rw-r--r--roles/openshift_logging/meta/main.yaml1
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml17
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml6
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml48
-rw-r--r--roles/openshift_logging/tasks/main.yaml7
-rw-r--r--roles/openshift_logging/tasks/patch_configmap_file.yaml35
-rw-r--r--roles/openshift_logging/tasks/patch_configmap_files.yaml31
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/set_defaults_from_current.yml34
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml1
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml1
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml17
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml42
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml54
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml62
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j22
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml6
-rw-r--r--roles/openshift_logging_eventrouter/meta/main.yaml (renamed from roles/openshift_node_facts/meta/main.yml)8
-rw-r--r--roles/openshift_logging_eventrouter/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml1
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml3
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml44
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml8
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml1
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml30
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_manage_node/meta/main.yml1
-rw-r--r--roles/openshift_manage_node/tasks/main.yml7
-rw-r--r--roles/openshift_manageiq/meta/main.yml1
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml4
-rw-r--r--roles/openshift_management/tasks/main.yml2
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml6
-rw-r--r--roles/openshift_master/defaults/main.yml19
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml15
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/set_loopback_context.yml8
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_master/tasks/upgrade.yml2
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml23
-rw-r--r--roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml2
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j224
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master_certificates/meta/main.yml3
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml28
l---------roles/openshift_master_facts/filter_plugins/oo_filters.py1
-rw-r--r--roles/openshift_master_facts/meta/main.yml1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml8
-rw-r--r--roles/openshift_metrics/meta/main.yaml5
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml19
-rw-r--r--roles/openshift_metrics/tasks/main.yaml4
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml14
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml6
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/uninstall_hosa.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml15
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml1
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j22
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_named_certificates/meta/main.yml1
-rw-r--r--roles/openshift_named_certificates/tasks/main.yml5
-rw-r--r--roles/openshift_nfs/meta/main.yml2
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml2
-rw-r--r--roles/openshift_node/defaults/main.yml77
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml7
-rw-r--r--roles/openshift_node/tasks/config.yml12
-rw-r--r--roles/openshift_node/tasks/container_images.yml2
-rw-r--r--roles/openshift_node/tasks/dnsmasq_install.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml42
-rw-r--r--roles/openshift_node/tasks/main.yml16
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml8
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade.yml7
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml10
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml6
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade/stop_services.yml4
-rw-r--r--roles/openshift_node/tasks/upgrade_pre.yml6
-rw-r--r--roles/openshift_node/templates/node.service.j22
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j218
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service10
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service2
-rw-r--r--roles/openshift_node_certificates/defaults/main.yml2
-rw-r--r--roles/openshift_node_certificates/meta/main.yml3
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml33
-rw-r--r--roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py32
-rw-r--r--roles/openshift_node_facts/tasks/main.yml25
-rw-r--r--roles/openshift_node_group/defaults/main.yml8
-rw-r--r--roles/openshift_openstack/defaults/main.yml5
-rw-r--r--roles/openshift_openstack/tasks/check-prerequisites.yml2
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j295
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j23
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml1
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml4
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml4
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j22
-rw-r--r--roles/openshift_persistent_volumes/templates/persistent-volume.yml.j24
-rw-r--r--roles/openshift_project_request_template/tasks/main.yml4
-rw-r--r--roles/openshift_prometheus/meta/main.yaml5
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml10
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml2
-rw-r--r--roles/openshift_provisioners/meta/main.yaml1
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml6
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml22
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml6
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/tasks/rhel_repos.yml5
-rw-r--r--roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j227
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py10
-rw-r--r--roles/openshift_sanitize_inventory/meta/main.yml4
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml26
-rw-r--r--roles/openshift_sanitize_inventory/vars/main.yml3
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml1
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml26
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml6
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml2
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j22
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j26
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml104
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml16
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml12
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j217
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md8
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml1
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml2
-rw-r--r--roles/openshift_version/defaults/main.yml2
-rw-r--r--roles/openshift_version/tasks/check_available_rpms.yml10
-rw-r--r--roles/openshift_version/tasks/first_master.yml30
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml (renamed from roles/openshift_version/tasks/set_version_containerized.yml)7
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml20
-rw-r--r--roles/openshift_version/tasks/main.yml210
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml42
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml24
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/meta/main.yaml19
-rw-r--r--roles/openshift_web_console/tasks/install.yml111
-rw-r--r--roles/openshift_web_console/tasks/main.yml8
-rw-r--r--roles/openshift_web_console/tasks/remove.yml5
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml71
-rw-r--r--roles/openshift_web_console/vars/default_images.yml4
-rw-r--r--roles/openshift_web_console/vars/main.yml6
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml4
-rw-r--r--roles/os_firewall/README.md4
-rw-r--r--roles/template_service_broker/meta/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml14
-rw-r--r--roles/template_service_broker/tasks/remove.yml6
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/tuned/tasks/main.yml2
452 files changed, 12710 insertions, 2251 deletions
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
index ec4aafb79..65b736500 100644
--- a/roles/ansible_service_broker/meta/main.yml
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_utils
- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 4ca47d074..f869b5fae 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -4,7 +4,7 @@
- name: Set default image variables based on deployment type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: set ansible_service_broker facts
@@ -72,6 +72,15 @@
- apiGroups: ["image.openshift.io", ""]
resources: ["images"]
verbs: ["get", "list"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["clusternetworks", "netnamespaces"]
+ verbs: ["get"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["netnamespaces"]
+ verbs: ["update"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete"]
- name: Create asb-access cluster role
oc_clusterrole:
@@ -366,6 +375,11 @@
secret:
secretName: etcd-auth-secret
+- name: set auth name and type facts if needed
+ set_fact:
+ ansible_service_broker_registry_auth_type: "secret"
+ ansible_service_broker_registry_auth_name: "asb-registry-auth"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
- name: Create config map for ansible-service-broker
@@ -393,6 +407,8 @@
org: {{ ansible_service_broker_registry_organization }}
tag: {{ ansible_service_broker_registry_tag }}
white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }}
+ auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
+ auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
- type: local_openshift
name: localregistry
namespaces: ['openshift']
@@ -438,6 +454,7 @@
data: "{{ ansible_service_broker_registry_user }}"
- path: password
data: "{{ ansible_service_broker_registry_password }}"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
- name: Create the Broker resource in the catalog
oc_obj:
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 248e0363d..0ed1d9674 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -1,6 +1,6 @@
---
-__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/origin-
__ansible_service_broker_image_tag: latest
__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
diff --git a/roles/calico/meta/main.yml b/roles/calico/meta/main.yml
index 816c81369..e3997911b 100644
--- a/roles/calico/meta/main.yml
+++ b/roles/calico/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: openshift_facts
- role: openshift_master_facts
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index bbc6edd48..556953a71 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -7,7 +7,7 @@
- not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
- name: Calico Node | Generate OpenShift-etcd certs
- include_role:
+ import_role:
name: etcd
tasks_from: client_certificates
when: calico_etcd_ca_cert_file is not defined or calico_etcd_cert_file is not defined or calico_etcd_key_file is not defined or calico_etcd_endpoints is not defined or calico_etcd_cert_dir is not defined
diff --git a/roles/calico_master/meta/main.yml b/roles/calico_master/meta/main.yml
index 4d70c79cf..73c94db4e 100644
--- a/roles/calico_master/meta/main.yml
+++ b/roles/calico_master/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: calico
- role: openshift_facts
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 16d960d8b..834ebba64 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -19,11 +19,11 @@
- name: Calico Master | Launch Calico Policy Controller
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/calico-policy-controller.yml
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: calico_create_output
- failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0"
changed_when: ('created' in calico_create_output.stdout)
- name: Calico Master | Delete temp directory
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml
index 4d619fff6..372c29c28 100644
--- a/roles/cockpit-ui/meta/main.yml
+++ b/roles/cockpit-ui/meta/main.yml
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_utils
- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index f60912033..d4174d879 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -39,7 +39,7 @@
- name: Deploy registry-console
command: >
- {{ openshift.common.client_binary }} new-app --template=registry-console
+ {{ openshift_client_binary }} new-app --template=registry-console
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
{% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml
index 8c0ed3cb8..07e466f04 100644
--- a/roles/cockpit/meta/main.yml
+++ b/roles/cockpit/meta/main.yml
@@ -12,4 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index fc13afed3..577cd7daf 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -10,7 +10,7 @@
- cockpit-bridge
- cockpit-docker
- "{{ cockpit_plugins }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -19,4 +19,4 @@
name: cockpit.socket
enabled: true
state: started
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index 51f469aaf..665b1b012 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -5,7 +5,7 @@ Ensures docker package or system container is installed, and optionally raises t
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-This role is designed to be used with include_role and tasks_from.
+This role is designed to be used with import_role and tasks_from.
Entry points
------------
@@ -30,7 +30,7 @@ Example Playbook
- hosts: servers
tasks:
- - include_role: container_runtime
+ - import_role: container_runtime
tasks_from: package_docker.yml
License
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index dd185cb38..d0e37e2f4 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -2,8 +2,6 @@
docker_cli_auth_config_path: '/root/.docker'
openshift_docker_signature_verification: False
-repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
openshift_docker_alternative_creds: False
# oreg_url is defined by user input.
@@ -13,7 +11,7 @@ oreg_auth_credentials_replace: False
openshift_docker_use_system_container: False
openshift_docker_disable_push_dockerhub: False # bool
openshift_docker_selinux_enabled: True
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
openshift_docker_hosted_registry_insecure: False # bool
@@ -55,11 +53,25 @@ openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['o
docker_alt_storage_path: /var/lib/containers/docker
docker_default_storage_path: /var/lib/docker
+docker_storage_path: "{{ docker_default_storage_path }}"
+docker_storage_size: 40G
+docker_storage_setup_options:
+ vg: docker_vg
+ data_size: 99%VG
+ storage_driver: overlay2
+ root_lv_name: docker-root-lv
+ root_lv_size: 100%FREE
+ root_lv_mount_path: "{{ docker_storage_path }}"
+docker_storage_extra_options:
+- "--storage-opt overlay2.override_kernel_check=true"
+- "--storage-opt overlay2.size={{ docker_storage_size }}"
+- "--graph={{ docker_storage_path}}"
+
# Set local versions of facts that must be in json format for container-daemon.json
# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
l_docker_log_options: "{{ l2_docker_log_options | to_json }}"
-l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}"
+l_docker_log_options_dict: "{{ l2_docker_log_options | lib_utils_oo_list_to_dict | to_json }}"
l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"
l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"
l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}"
diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml
index 02fceb745..3bc2607fb 100644
--- a/roles/container_runtime/meta/main.yml
+++ b/roles/container_runtime/meta/main.yml
@@ -11,5 +11,5 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
index d790eb2c0..23fd8528a 100644
--- a/roles/container_runtime/tasks/common/post.yml
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -11,7 +11,7 @@
- meta: flush_handlers
# This needs to run after docker is restarted to account for proxy settings.
-# registry_auth is called directly with include_role in some places, so we
+# registry_auth is called directly with import_role in some places, so we
# have to put it in the root of the tasks/ directory.
- include_tasks: ../registry_auth.yml
@@ -22,5 +22,5 @@
- include_tasks: setup_docker_symlink.yml
when:
- - openshift_use_crio
+ - openshift_use_crio | bool
- dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
index b41122880..d429047e6 100644
--- a/roles/container_runtime/tasks/common/syscontainer_packages.yml
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -4,7 +4,7 @@
package:
name: container-selinux
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -13,7 +13,7 @@
package:
name: atomic
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -23,6 +23,6 @@
package:
name: runc
state: present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/container_runtime/tasks/docker_storage_setup_overlay.yml b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml
new file mode 100644
index 000000000..782c002e3
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml
@@ -0,0 +1,10 @@
+---
+- name: Setup the docker-storage for overlay
+ template:
+ src: docker_storage_setup.j2
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+ when:
+ - container_runtime_docker_storage_type == 'overlay2'
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
index 6731963dd..8dd916e79 100644
--- a/roles/container_runtime/tasks/docker_upgrade_check.yml
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -21,6 +21,7 @@
retries: 4
until: curr_docker_version is succeeded
changed_when: false
+ when: not openshift_is_atomic | bool
- name: Get latest available version of Docker
command: >
@@ -29,7 +30,9 @@
retries: 4
until: avail_docker_version is succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
failed_when: false
changed_when: false
@@ -37,9 +40,10 @@
msg: This playbook requires access to Docker 1.12 or later
# Disable the 1.12 requirement if the user set a specific Docker version
when:
- - docker_version is not defined
- - docker_upgrade is not defined or docker_upgrade | bool == True
- - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
+ - not openshift_is_atomic | bool
+ - docker_version is not defined
+ - docker_upgrade is not defined or docker_upgrade | bool == True
+ - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
@@ -48,27 +52,30 @@
# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0 and docker_version is not defined
- name: Flag for Docker upgrade if necessary
set_fact:
l_docker_upgrade: True
when:
- - pkg_check.rc == 0
- - curr_docker_version.stdout is version_compare(docker_version,'<')
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
+ - curr_docker_version.stdout is version_compare(docker_version,'<')
# Additional checks for Atomic hosts:
- name: Determine available Docker
shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- set_fact:
l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- fail:
msg: This playbook requires access to Docker 1.12 or later
when:
- - openshift.common.is_atomic | bool
- - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
+ - openshift_is_atomic | bool
+ - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 96d8606c6..07da831c4 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,2 +1,2 @@
---
-# This role is meant to be used with include_role and tasks_from.
+# This role is meant to be used with import_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index d9d4037dd..d6e7e7fed 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -3,7 +3,7 @@
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: curr_docker_version
retries: 4
until: curr_docker_version is succeeded
@@ -20,7 +20,7 @@
name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
state: present
when:
- - not (openshift.common.is_atomic | bool)
+ - not (openshift_is_atomic | bool)
- not (curr_docker_version is skipped)
- not (curr_docker_version.stdout != '')
register: result
@@ -48,7 +48,7 @@
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
when:
- item.reg_fact_val != []
- docker_check.stat.isreg is defined
@@ -101,7 +101,7 @@
line: "OPTIONS='\
{% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
{% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \
- {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if l2_docker_log_options != [] %} {{ l2_docker_log_options | lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
{% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \
{% if docker_options is defined %} {{ docker_options }}{% endif %} \
{% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \
diff --git a/roles/container_runtime/tasks/registry_auth.yml b/roles/container_runtime/tasks/registry_auth.yml
index 2c7bc5711..4f1abd59a 100644
--- a/roles/container_runtime/tasks/registry_auth.yml
+++ b/roles/container_runtime/tasks/registry_auth.yml
@@ -15,6 +15,7 @@
- not openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ no_log: True
# docker_creds is a custom module from lib_utils
# 'docker login' requires a docker.service running on the local host, this is an
@@ -30,3 +31,4 @@
- openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ no_log: True
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 61f122f3c..d588f2618 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -3,7 +3,7 @@
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_node_system_container | bool
- include_tasks: common/pre.yml
@@ -81,6 +81,17 @@
dest: /etc/cni/net.d/openshift-sdn.conf
src: 80-openshift-sdn.conf.j2
+- name: Create /etc/sysconfig/crio-storage
+ copy:
+ content: ""
+ dest: /etc/sysconfig/crio-storage
+ force: no
+
+- name: Create /etc/sysconfig/crio-network
+ template:
+ dest: /etc/sysconfig/crio-network
+ src: crio-network.j2
+
- name: Start the CRI-O service
systemd:
name: "cri-o"
@@ -93,4 +104,4 @@
# 'docker login'
- include_tasks: common/post.yml
vars:
- openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
+ openshift_docker_alternative_creds: "{{ openshift_use_crio_only | bool }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 639585367..5f715cd21 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -18,7 +18,7 @@
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -42,6 +42,12 @@
- debug:
var: l_docker_image
+# Do the authentication before pulling the container engine system container
+# as the pull might be from an authenticated registry.
+- include_tasks: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: True
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2
new file mode 100644
index 000000000..763be97d7
--- /dev/null
+++ b/roles/container_runtime/templates/crio-network.j2
@@ -0,0 +1,9 @@
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY={{ openshift.common.http_proxy }}
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY={{ openshift.common.https_proxy }}
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY={{ openshift.common.no_proxy }}
+{% endif %}
diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2
new file mode 100644
index 000000000..b056087e0
--- /dev/null
+++ b/roles/container_runtime/templates/docker_storage_setup.j2
@@ -0,0 +1,12 @@
+# Edit this file to override any configuration options specified in
+# /usr/lib/docker-storage-setup/docker-storage-setup.
+#
+# For more details refer to "man docker-storage-setup"
+DEVS={{ container_runtime_docker_storage_setup_device }}
+VG={{ docker_storage_setup_options.vg }}
+DATA_SIZE={{ docker_storage_setup_options.data_size }}
+STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}"
+CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}"
+CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}"
+CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}"
+EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}"
diff --git a/roles/contiv/README.md b/roles/contiv/README.md
index fa36039d9..ce414f9fb 100644
--- a/roles/contiv/README.md
+++ b/roles/contiv/README.md
@@ -19,8 +19,8 @@ Install Contiv components (netmaster, netplugin, contiv_etcd) on Master and Mini
* ``openshift_use_contiv=True``
* ``openshift_use_openshift_sdn=False``
* ``os_sdn_network_plugin_name='cni'``
-* ``netmaster_interface=eth0``
-* ``netplugin_interface=eth1``
+* ``contiv_netmaster_interface=eth0``
+* ``contiv_netplugin_interface=eth1``
* ref. Openshift docs Contiv section for more details
## Example bare metal deployment of Openshift + Contiv
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index aa976d921..4869abc61 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -1,51 +1,63 @@
---
# The version of Contiv binaries to use
-contiv_version: 1.1.1
+contiv_version: 1.2.0
# The version of cni binaries
-cni_version: v0.4.0
+contiv_cni_version: v0.4.0
+
+# If the node we are deploying to is to be a contiv master.
+contiv_master: false
contiv_default_subnet: "10.128.0.0/16"
contiv_default_gw: "10.128.254.254"
-# TCP port that Netmaster listens for network connections
-netmaster_port: 9999
-# Default for contiv_role
-contiv_role: netmaster
+# Ports netmaster listens on
+contiv_netmaster_port: 9999
+contiv_netmaster_port_proto: tcp
+contiv_ofnet_master_port: 9001
+contiv_ofnet_master_port_proto: tcp
+# Ports netplugin listens on
+contiv_netplugin_port: 6640
+contiv_netplugin_port_proto: tcp
+contiv_ofnet_vxlan_port: 9002
+contiv_ofnet_vxlan_port_proto: tcp
+contiv_ovs_port: 9003
+contiv_ovs_port_proto: tcp
-# TCP port that Netplugin listens for network connections
-netplugin_port: 6640
-contiv_rpc_port1: 9001
-contiv_rpc_port2: 9002
-contiv_rpc_port3: 9003
+contiv_vxlan_port: 4789
+contiv_vxlan_port_proto: udp
# Interface used by Netplugin for inter-host traffic when encap_mode is vlan.
# The interface must support 802.1Q trunking.
-netplugin_interface: "eno16780032"
+contiv_netplugin_interface: "eno16780032"
# IP address of the interface used for control communication within the cluster
# It needs to be reachable from all nodes in the cluster.
-netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+contiv_netplugin_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
# IP used to terminate vxlan tunnels
-netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + netplugin_interface].ipv4.address }}"
+contiv_netplugin_vtep_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netplugin_interface].ipv4.address }}"
# Interface used to bind Netmaster service
-netmaster_interface: "{{ netplugin_interface }}"
+contiv_netmaster_interface: "{{ contiv_netplugin_interface }}"
+
+# IP address of the interface used for control communication within the cluster
+# It needs to be reachable from all nodes in the cluster.
+contiv_netmaster_ctrl_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
# Path to the contiv binaries
-bin_dir: /usr/bin
+contiv_bin_dir: /usr/bin
# Path to the contivk8s cni binary
-cni_bin_dir: /opt/cni/bin
+contiv_cni_bin_dir: /opt/cni/bin
# Path to cni archive download directory
-cni_download_dir: /tmp
+contiv_cni_download_dir: /tmp
# URL for cni binaries
-cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
-cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2"
+contiv_cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+contiv_cni_bin_url: "{{ contiv_cni_bin_url_base }}/{{ contiv_cni_version }}/cni-{{ contiv_cni_version }}.tbz2"
# Contiv config directory
@@ -60,11 +72,11 @@ contiv_download_url_base: "https://github.com/contiv/netplugin/releases/download
contiv_download_url: "{{ contiv_download_url_base }}/{{ contiv_version }}/netplugin-{{ contiv_version }}.tar.bz2"
# This is where kubelet looks for plugin files
-kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
+contiv_kube_plugin_dir: /usr/libexec/kubernetes/kubelet-plugins/net/exec
# Specifies routed mode vs bridged mode for networking (bridge | routing)
# if you are using an external router for all routing, you should select bridge here
-netplugin_fwd_mode: bridge
+contiv_netplugin_fwd_mode: routing
# Contiv fabric mode aci|default
contiv_fabric_mode: default
@@ -73,10 +85,10 @@ contiv_fabric_mode: default
contiv_vlan_range: "2900-3000"
# Encapsulation type vlan|vxlan to use for instantiating container networks
-contiv_encap_mode: vlan
+contiv_encap_mode: vxlan
# Backend used by Netplugin for instantiating container networks
-netplugin_driver: ovs
+contiv_netplugin_driver: ovs
# Create a default Contiv network for use by pods
contiv_default_network: true
@@ -85,39 +97,80 @@ contiv_default_network: true
contiv_default_network_tag: ""
#SRFIXME (use the openshift variables)
-https_proxy: ""
-http_proxy: ""
-no_proxy: ""
+contiv_https_proxy: ""
+contiv_http_proxy: ""
+contiv_no_proxy: ""
# The following are aci specific parameters when contiv_fabric_mode: aci is set.
# Otherwise, you can ignore these.
-apic_url: ""
-apic_username: ""
-apic_password: ""
-apic_leaf_nodes: ""
-apic_phys_dom: ""
-apic_contracts_unrestricted_mode: no
-apic_epg_bridge_domain: not_specified
+contiv_apic_url: ""
+contiv_apic_username: ""
+contiv_apic_password: ""
+contiv_apic_leaf_nodes: ""
+contiv_apic_phys_dom: ""
+contiv_apic_contracts_unrestricted_mode: no
+contiv_apic_epg_bridge_domain: not_specified
apic_configure_default_policy: false
-apic_default_external_contract: "uni/tn-common/brc-default"
-apic_default_app_profile: "contiv-infra-app-profile"
-is_atomic: False
-kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
-master_name: "{{ groups['masters'][0] }}"
-contiv_etcd_port: 22379
-etcd_url: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ contiv_etcd_port }}"
-kube_ca_cert: "{{ kube_cert_dir }}/ca.crt"
-kube_key: "{{ kube_cert_dir }}/admin.key"
-kube_cert: "{{ kube_cert_dir }}/admin.crt"
-kube_master_api_port: 8443
+contiv_apic_default_external_contract: "uni/tn-common/brc-default"
+contiv_apic_default_app_profile: "contiv-infra-app-profile"
+contiv_kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
+contiv_kube_ca_cert: "{{ contiv_kube_cert_dir }}/ca.crt"
+contiv_kube_key: "{{ contiv_kube_cert_dir }}/admin.key"
+contiv_kube_cert: "{{ contiv_kube_cert_dir }}/admin.crt"
+contiv_kube_master_api_port: 8443
+contiv_kube_master_api_port_proto: tcp
# contivh1 default subnet and gateway
-#contiv_h1_subnet_default: "132.1.1.0/24"
-#contiv_h1_gw_default: "132.1.1.1"
contiv_h1_subnet_default: "10.129.0.0/16"
contiv_h1_gw_default: "10.129.0.1"
# contiv default private subnet for ext access
contiv_private_ext_subnet: "10.130.0.0/16"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+contiv_openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
+
+contiv_api_proxy_port: 10000
+contiv_api_proxy_port_proto: tcp
+contiv_api_proxy_image_repo: contiv/auth_proxy
+contiv_api_proxy_ip: "{{ hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address }}"
+
+contiv_etcd_system_user: contivetcd
+contiv_etcd_system_uid: 823
+contiv_etcd_system_group: contivetcd
+contiv_etcd_system_gid: 823
+contiv_etcd_port: 22379
+contiv_etcd_port_proto: tcp
+contiv_etcd_peer_port: 22380
+contiv_etcd_peer_port_proto: tcp
+contiv_etcd_url: "http://127.0.0.1:{{ contiv_etcd_port }}"
+contiv_etcd_init_image_repo: ferest/etcd-initer
+contiv_etcd_init_image_tag: latest
+contiv_etcd_image_repo: quay.io/coreos/etcd
+contiv_etcd_image_tag: v3.2.4
+contiv_etcd_conf_dir: /etc/contiv-etcd
+contiv_etcd_data_dir: /var/lib/contiv-etcd
+contiv_etcd_peers: |-
+ {% for host in groups.oo_masters_to_config -%}
+ {{ host }}=http://{{ hostvars[host]['ip'] | default(hostvars[host].ansible_default_ipv4['address']) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+# List of port/protocol pairs to allow inbound access to on every host
+# netplugin runs on, from all host IPs in the cluster.
+contiv_netplugin_internal: [ "{{ contiv_ofnet_vxlan_port }}/{{ contiv_ofnet_vxlan_port_proto }}",
+ "{{ contiv_ovs_port }}/{{ contiv_ovs_port_proto }}",
+ "{{ contiv_vxlan_port }}/{{ contiv_vxlan_port_proto }}" ]
+# Allow all forwarded traffic in and out of these interfaces.
+contiv_netplugin_forward_interfaces: [ contivh0, contivh1 ]
+
+# List of port/protocol pairs to allow inbound access to on every host
+# netmaster runs on, from all host IPs in the cluster. Note that every host
+# that runs netmaster also runs netplugin, so the above netplugin rules will
+# apply as well.
+contiv_netmaster_internal: [ "{{ contiv_ofnet_master_port }}/{{ contiv_ofnet_master_port_proto }}",
+ "{{ contiv_netmaster_port }}/{{ contiv_netmaster_port_proto }}",
+ "{{ contiv_etcd_port }}/{{ contiv_etcd_port_proto }}",
+ "{{ contiv_etcd_peer_port }}/{{ contiv_etcd_peer_port_proto }}",
+ "{{ contiv_kube_master_api_port }}/{{ contiv_kube_master_api_port_proto }}" ]
+# List of port/protocol pairs to allow inbound access to on every host
+# netmaster runs on, from any host anywhere.
+contiv_netmaster_external: [ "{{ contiv_api_proxy_port }}/{{ contiv_api_proxy_port_proto }}" ]
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index 52b9d09dd..e8607cc90 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -13,18 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
+- role: lib_utils
- role: contiv_facts
-- role: etcd
- etcd_service: contiv-etcd
- etcd_is_thirdparty: True
- etcd_peer_port: 22380
- etcd_client_port: 22379
- etcd_conf_dir: /etc/contiv-etcd/
- etcd_data_dir: /var/lib/contiv-etcd/
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_config_dir: /etc/contiv-etcd/
- etcd_url_scheme: http
- etcd_peer_url_scheme: http
- when: contiv_role == "netmaster"
-- role: contiv_auth_proxy
- when: contiv_role == "netmaster"
diff --git a/roles/contiv/tasks/aci.yml b/roles/contiv/tasks/aci.yml
index 30d2eb339..8a56b3590 100644
--- a/roles/contiv/tasks/aci.yml
+++ b/roles/contiv/tasks/aci.yml
@@ -11,7 +11,7 @@
- name: ACI | Copy shell script used by aci-gw service
template:
src: aci_gw.j2
- dest: "{{ bin_dir }}/aci_gw.sh"
+ dest: "{{ contiv_bin_dir }}/aci_gw.sh"
mode: u=rwx,g=rx,o=rx
- name: ACI | Copy systemd units for aci-gw
diff --git a/roles/contiv/tasks/api_proxy.yml b/roles/contiv/tasks/api_proxy.yml
new file mode 100644
index 000000000..8b524dd6e
--- /dev/null
+++ b/roles/contiv/tasks/api_proxy.yml
@@ -0,0 +1,120 @@
+---
+- name: API proxy | Create contiv-api-proxy openshift user
+ oc_serviceaccount:
+ state: present
+ name: contiv-api-proxy
+ namespace: kube-system
+ run_once: true
+
+- name: API proxy | Set contiv-api-proxy openshift user permissions
+ oc_adm_policy_user:
+ user: system:serviceaccount:kube-system:contiv-api-proxy
+ resource_kind: scc
+ resource_name: hostnetwork
+ state: present
+ run_once: true
+
+- name: API proxy | Create temp directory for doing work
+ command: mktemp -d /tmp/openshift-contiv-XXXXXX
+ register: mktemp
+ changed_when: False
+ # For things that pass temp files between steps, we want to make sure they
+ # run on the same node.
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Check for existing api proxy secret volume
+ oc_obj:
+ namespace: kube-system
+ kind: secret
+ state: list
+ selector: "name=contiv-api-proxy-secret"
+ register: existing_secret_volume
+ run_once: true
+
+- name: API proxy | Generate a self signed certificate for api proxy
+ command: openssl req -new -nodes -x509 -subj "/C=US/ST=/L=/O=/CN=localhost" -days 3650 -keyout "{{ mktemp.stdout }}/key.pem" -out "{{ mktemp.stdout }}/cert.pem" -extensions v3_ca
+ when: (contiv_api_proxy_cert is not defined or contiv_api_proxy_key is not defined)
+ and not existing_secret_volume.results.results[0]['items']
+ register: created_self_signed_cert
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Read self signed certificate file
+ command: cat "{{ mktemp.stdout }}/cert.pem"
+ register: generated_cert
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Read self signed key file
+ command: cat "{{ mktemp.stdout }}/key.pem"
+ register: generated_key
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-secrets.yml from template using generated cert
+ template:
+ src: api-proxy-secrets.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ vars:
+ key: "{{ generated_key.stdout }}"
+ cert: "{{ generated_cert.stdout }}"
+ when: created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-secrets.yml from template using user defined cert
+ template:
+ src: api-proxy-secrets.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ vars:
+ key: "{{ lookup('file', contiv_api_proxy_key) }}"
+ cert: "{{ lookup('file', contiv_api_proxy_cert) }}"
+ when: contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create secret certificate volume
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: secret
+ name: contiv-api-proxy-secret
+ files:
+ - "{{ mktemp.stdout }}/api-proxy-secrets.yml"
+ when: (contiv_api_proxy_cert is defined and contiv_api_proxy_key is defined)
+ or created_self_signed_cert.changed
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Create api-proxy-daemonset.yml from template
+ template:
+ src: api-proxy-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
+ vars:
+ etcd_host: "etcd://{{ groups.oo_etcd_to_config.0 }}:{{ contiv_etcd_port }}"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+# Always "import" this file, k8s won't do anything if it matches exactly what
+# is already in the cluster.
+- name: API proxy | Add API proxy daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-api-proxy
+ files:
+ - "{{ mktemp.stdout }}/api-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: API proxy | Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index 8a928ea54..e9763d34a 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -1,71 +1,71 @@
---
-- name: Contiv | Wait for netmaster
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" tenant ls'
+- name: Default network | Wait for netmaster
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" tenant ls'
register: tenant_result
until: tenant_result.stdout.find("default") != -1
retries: 9
delay: 10
-- name: Contiv | Set globals
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+- name: Default network | Set globals
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ contiv_netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
run_once: true
-- name: Contiv | Set arp mode to flood if ACI
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
+- name: Default network | Set arp mode to flood if ACI
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" global set --arp-mode flood'
when: contiv_fabric_mode == "aci"
run_once: true
-- name: Contiv | Check if default-net exists
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
+- name: Default network | Check if default-net exists
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net ls'
register: net_result
run_once: true
-- name: Contiv | Create default-net
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
+- name: Default network | Create default-net
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
run_once: true
-- name: Contiv | Create host access infra network for VxLan routing case
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
- when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+- name: Default network | Create host access infra network for VxLan routing case
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
+ when: (contiv_encap_mode == "vxlan") and (contiv_netplugin_fwd_mode == "routing")
run_once: true
-#- name: Contiv | Create an allow-all policy for the default-group
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
+#- name: Default network | Create an allow-all policy for the default-group
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy create ose-allow-all-policy'
# when: contiv_fabric_mode == "aci"
# run_once: true
-- name: Contiv | Set up aci external contract to consume default external contract
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
+- name: Default network | Set up aci external contract to consume default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -c -a {{ contiv_apic_default_external_contract }} oseExtToConsume'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-- name: Contiv | Set up aci external contract to provide default external contract
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
+- name: Default network | Set up aci external contract to provide default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" external-contracts create -p -a {{ contiv_apic_default_external_contract }} oseExtToProvide'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-- name: Contiv | Create aci default-group
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
+- name: Default network | Create aci default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create default-net default-group'
when: contiv_fabric_mode == "aci"
run_once: true
-- name: Contiv | Add external contracts to the default-group
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
+- name: Default network | Add external contracts to the default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
run_once: true
-#- name: Contiv | Add policy rule 1 for allow-all policy
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
+#- name: Default network | Add policy rule 1 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
# when: contiv_fabric_mode == "aci"
# run_once: true
-#- name: Contiv | Add policy rule 2 for allow-all policy
-# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
+#- name: Default network | Add policy rule 2 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
# when: contiv_fabric_mode == "aci"
# run_once: true
-- name: Contiv | Create default aci app profile
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
+- name: Default network | Create default aci app profile
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ contiv_netmaster_port }}" app-profile create -g default-group {{ contiv_apic_default_app_profile }}'
when: contiv_fabric_mode == "aci"
run_once: true
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
index 831fd360a..47d74da9c 100644
--- a/roles/contiv/tasks/download_bins.yml
+++ b/roles/contiv/tasks/download_bins.yml
@@ -4,7 +4,7 @@
path: "{{ contiv_current_release_directory }}"
state: directory
-- name: Install bzip2
+- name: Download Bins | Install bzip2
yum:
name: bzip2
state: installed
@@ -18,9 +18,9 @@
mode: 0755
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
- name: Download Bins | Extract Contiv tar file
unarchive:
@@ -30,19 +30,19 @@
- name: Download Bins | Download cni tar file
get_url:
- url: "{{ cni_bin_url }}"
- dest: "{{ cni_download_dir }}"
+ url: "{{ contiv_cni_bin_url }}"
+ dest: "{{ contiv_cni_download_dir }}"
mode: 0755
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
register: download_file
- name: Download Bins | Extract cni tar file
unarchive:
src: "{{ download_file.dest }}"
- dest: "{{ cni_download_dir }}"
+ dest: "{{ contiv_cni_download_dir }}"
copy: no
when: download_file.changed
diff --git a/roles/contiv/tasks/etcd.yml b/roles/contiv/tasks/etcd.yml
new file mode 100644
index 000000000..b08ead982
--- /dev/null
+++ b/roles/contiv/tasks/etcd.yml
@@ -0,0 +1,114 @@
+---
+# To run contiv-etcd in a container as non-root, we need to match the uid/gid
+# with the filesystem permissions on the host.
+- name: Contiv etcd | Create local unix group
+ group:
+ name: "{{ contiv_etcd_system_group }}"
+ gid: "{{ contiv_etcd_system_gid }}"
+ system: yes
+
+- name: Contiv etcd | Create local unix user
+ user:
+ name: "{{ contiv_etcd_system_user }}"
+ createhome: no
+ uid: "{{ contiv_etcd_system_uid }}"
+ group: "{{ contiv_etcd_system_group }}"
+ home: "{{ contiv_etcd_data_dir }}"
+ shell: /bin/false
+ system: yes
+
+- name: Contiv etcd | Create directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ mode: g-rwx,o-rwx
+ owner: "{{ contiv_etcd_system_user }}"
+ group: "{{ contiv_etcd_system_group }}"
+ setype: svirt_sandbox_file_t
+ seuser: system_u
+ serole: object_r
+ selevel: s0
+ recurse: yes
+ with_items:
+ - "{{ contiv_etcd_data_dir }}"
+ - "{{ contiv_etcd_conf_dir }}"
+
+- name: Contiv etcd | Create contiv-etcd openshift user
+ oc_serviceaccount:
+ state: present
+ name: contiv-etcd
+ namespace: kube-system
+ run_once: true
+
+- name: Contiv etcd | Create temp directory for doing work
+ command: mktemp -d /tmp/openshift-contiv-XXXXXX
+ register: mktemp
+ changed_when: False
+ # For things that pass temp files between steps, we want to make sure they
+ # run on the same node.
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd-scc.yml from template
+ template:
+ src: etcd-scc.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-scc.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd.yml from template
+ template:
+ src: etcd-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Create etcd-proxy.yml from template
+ template:
+ src: etcd-proxy-daemonset.yml.j2
+ dest: "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Add etcd scc
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: SecurityContextConstraints
+ name: contiv-etcd
+ files:
+ - "{{ mktemp.stdout }}/etcd-scc.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+# Always "import" this file, k8s won't do anything if it matches exactly what
+# is already in the cluster.
+- name: Contiv etcd | Add etcd daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-etcd
+ files:
+ - "{{ mktemp.stdout }}/etcd-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Add etcd-proxy daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-system"
+ kind: daemonset
+ name: contiv-etcd-proxy
+ files:
+ - "{{ mktemp.stdout }}/etcd-proxy-daemonset.yml"
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
+
+- name: Contiv etcd | Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ delegate_to: "{{ groups.oo_masters_to_config.0 }}"
+ run_once: true
diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml
index cb9196a71..4d530ae90 100644
--- a/roles/contiv/tasks/main.yml
+++ b/roles/contiv/tasks/main.yml
@@ -1,14 +1,15 @@
---
-- name: Ensure bin_dir exists
+- include_tasks: old_version_cleanup.yml
+
+- name: Ensure contiv_bin_dir exists
file:
- path: "{{ bin_dir }}"
+ path: "{{ contiv_bin_dir }}"
recurse: yes
state: directory
- include_tasks: download_bins.yml
- include_tasks: netmaster.yml
- when: contiv_role == "netmaster"
+ when: contiv_master
- include_tasks: netplugin.yml
- when: contiv_role == "netplugin"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index 6f15af8c2..bb22fb801 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -1,34 +1,16 @@
---
- include_tasks: netmaster_firewalld.yml
- when: has_firewalld
+ when: contiv_has_firewalld
- include_tasks: netmaster_iptables.yml
- when: not has_firewalld and has_iptables
+ when: not contiv_has_firewalld and contiv_has_iptables
-- name: Netmaster | Check is /etc/hosts file exists
- stat:
- path: /etc/hosts
- register: hosts
-
-- name: Netmaster | Create hosts file if it is not present
- file:
- path: /etc/hosts
- state: touch
- when: not hosts.stat.exists
-
-- name: Netmaster | Build hosts file
- lineinfile:
- dest: /etc/hosts
- regexp: .*netmaster$
- line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster"
- state: present
- when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined
- with_items: "{{ groups['masters'] }}"
+- include_tasks: etcd.yml
- name: Netmaster | Create netmaster symlinks
file:
src: "{{ contiv_current_release_directory }}/{{ item }}"
- dest: "{{ bin_dir }}/{{ item }}"
+ dest: "{{ contiv_bin_dir }}/{{ item }}"
state: link
with_items:
- netmaster
@@ -36,7 +18,7 @@
- name: Netmaster | Copy environment file for netmaster
template:
- src: netmaster.env.j2
+ src: netmaster.j2
dest: /etc/default/netmaster
mode: 0644
notify: restart netmaster
@@ -75,3 +57,5 @@
- include_tasks: default_network.yml
when: contiv_default_network == true
+
+- include_tasks: api_proxy.yml
diff --git a/roles/contiv/tasks/netmaster_firewalld.yml b/roles/contiv/tasks/netmaster_firewalld.yml
index 2975351ac..0d52f821d 100644
--- a/roles/contiv/tasks/netmaster_firewalld.yml
+++ b/roles/contiv/tasks/netmaster_firewalld.yml
@@ -1,16 +1,17 @@
---
-- name: Netmaster Firewalld | Open Netmaster port
+- name: Netmaster Firewalld | Add internal rules
firewalld:
- port: "{{ netmaster_port }}/tcp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
+ immediate: true
+ permanent: true
+ port: "{{ item[0] }}"
+ source: "{{ item[1] }}"
+ with_nested:
+ - "{{ contiv_netmaster_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-- name: Netmaster Firewalld | Save Netmaster port
+- name: Netmaster Firewalld | Add external rules
firewalld:
- port: "{{ netmaster_port }}/tcp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
+ port: "{{ item }}"
+ with_items: "{{ contiv_netmaster_external }}"
diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml
index c98e7b6a5..3b68ea0c3 100644
--- a/roles/contiv/tasks/netmaster_iptables.yml
+++ b/roles/contiv/tasks/netmaster_iptables.yml
@@ -1,27 +1,32 @@
---
-- name: Netmaster IPtables | Get iptables rules
- command: iptables -L --wait
- register: iptablesrules
- check_mode: no
-
-- name: Netmaster IPtables | Enable iptables at boot
- service:
- name: iptables
- enabled: yes
- state: started
-
-- name: Netmaster IPtables | Open Netmaster with iptables
- command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ contiv_rpc_port1 }}"
- - "{{ contiv_rpc_port2 }}"
- - "{{ contiv_rpc_port3 }}"
- when: iptablesrules.stdout.find("contiv") == -1
+- name: Netmaster IPtables | Add internal rules
+ iptables:
+ action: insert
+ chain: INPUT
+ # Parsed from the contiv_netmaster_internal list, this will be tcp or udp.
+ protocol: "{{ item[0].split('/')[1] }}"
+ match: "{{ item[0].split('/')[1] }}"
+ # Parsed from the contiv_netmaster_internal list, this will be a port number.
+ destination_port: "{{ item[0].split('/')[0] }}"
+ # This is an IP address from a node in the cluster.
+ source: "{{ item[1] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_nested:
+ - "{{ contiv_netmaster_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
notify: Save iptables rules
-- name: Netmaster IPtables | Open netmaster main port
- command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}"
- when: iptablesrules.stdout.find("contiv") == -1
+- name: Netmaster IPtables | Add external rules
+ iptables:
+ action: insert
+ chain: INPUT
+ # Parsed from the contiv_netmaster_external list, this will be tcp or udp.
+ protocol: "{{ item.split('/')[1] }}"
+ match: "{{ item.split('/')[1] }}"
+ # Parsed from the contiv_netmaster_external list, this will be a port number.
+ destination_port: "{{ item.split('/')[0] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netmaster_external }}"
notify: Save iptables rules
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 540f6e4bc..60f432202 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -1,9 +1,9 @@
---
- include_tasks: netplugin_firewalld.yml
- when: has_firewalld
+ when: contiv_has_firewalld
- include_tasks: netplugin_iptables.yml
- when: has_iptables
+ when: not contiv_has_firewalld and contiv_has_iptables
- name: Netplugin | Ensure localhost entry correct in /etc/hosts
lineinfile:
@@ -20,41 +20,40 @@
state: absent
- include_tasks: ovs.yml
- when: netplugin_driver == "ovs"
+ when: contiv_netplugin_driver == "ovs"
- name: Netplugin | Create Netplugin bin symlink
file:
src: "{{ contiv_current_release_directory }}/netplugin"
- dest: "{{ bin_dir }}/netplugin"
+ dest: "{{ contiv_bin_dir }}/netplugin"
state: link
-
-- name: Netplugin | Ensure cni_bin_dir exists
+- name: Netplugin | Ensure contiv_cni_bin_dir exists
file:
- path: "{{ cni_bin_dir }}"
+ path: "{{ contiv_cni_bin_dir }}"
recurse: yes
state: directory
- name: Netplugin | Create CNI bin symlink
file:
src: "{{ contiv_current_release_directory }}/contivk8s"
- dest: "{{ cni_bin_dir }}/contivk8s"
+ dest: "{{ contiv_cni_bin_dir }}/contivk8s"
state: link
- name: Netplugin | Copy CNI loopback bin
copy:
- src: "{{ cni_download_dir }}/loopback"
- dest: "{{ cni_bin_dir }}/loopback"
+ src: "{{ contiv_cni_download_dir }}/loopback"
+ dest: "{{ contiv_cni_bin_dir }}/loopback"
remote_src: True
mode: 0755
-- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist
+- name: Netplugin | Ensure contiv_kube_plugin_dir and cni/net.d directories exist
file:
path: "{{ item }}"
recurse: yes
state: directory
with_items:
- - "{{ kube_plugin_dir }}"
+ - "{{ contiv_kube_plugin_dir }}"
- "/etc/cni/net.d"
- name: Netplugin | Ensure contiv_config_dir exists
@@ -68,7 +67,7 @@
src: contiv_cni.conf
dest: "{{ item }}"
with_items:
- - "{{ kube_plugin_dir }}/contiv_cni.conf"
+ - "{{ contiv_kube_plugin_dir }}/contiv_cni.conf"
- "/etc/cni/net.d"
# notify: restart kubelet
@@ -85,11 +84,11 @@
mode: 0644
notify: restart netplugin
-- name: Docker | Make sure proxy setting exists
+- name: Netplugin | Make sure docker proxy setting exists
lineinfile:
dest: /etc/sysconfig/docker-network
regexp: '^https_proxy.*'
- line: 'https_proxy={{ https_proxy }}'
+ line: 'https_proxy={{ contiv_https_proxy }}'
state: present
register: docker_updated
@@ -103,9 +102,9 @@
command: systemctl daemon-reload
when: docker_updated is changed
-- name: Docker | Restart docker
+- name: Netplugin | Restart docker
service:
- name: "{{ openshift_docker_service_name }}"
+ name: "{{ contiv_openshift_docker_service_name }}"
state: restarted
when: docker_updated is changed
register: l_docker_restart_docker_in_contiv_result
diff --git a/roles/contiv/tasks/netplugin_firewalld.yml b/roles/contiv/tasks/netplugin_firewalld.yml
index 3aeffae56..5ac531ec6 100644
--- a/roles/contiv/tasks/netplugin_firewalld.yml
+++ b/roles/contiv/tasks/netplugin_firewalld.yml
@@ -1,34 +1,17 @@
---
-- name: Netplugin Firewalld | Open Netplugin port
+- name: Netplugin Firewalld | Add internal rules
firewalld:
- port: "{{ netplugin_port }}/tcp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
-
-- name: Netplugin Firewalld | Save Netplugin port
- firewalld:
- port: "{{ netplugin_port }}/tcp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
-
-- name: Netplugin Firewalld | Open vxlan port
- firewalld:
- port: "8472/udp"
- permanent: false
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
- when: contiv_encap_mode == "vxlan"
+ port: "{{ item[0] }}"
+ source: "{{ item[1] }}"
+ with_nested:
+ - "{{ contiv_netplugin_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
-- name: Netplugin Firewalld | Save firewalld vxlan port for flanneld
+- name: Netplugin Firewalld | Add dns rule
firewalld:
- port: "8472/udp"
+ immediate: true
permanent: true
- state: enabled
- # in case this is also a node where firewalld turned off
- ignore_errors: yes
- when: contiv_encap_mode == "vxlan"
+ port: "53/udp"
+ interface: contivh0
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
index 3ea34645d..9d376f4e5 100644
--- a/roles/contiv/tasks/netplugin_iptables.yml
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -1,58 +1,52 @@
---
-- name: Netplugin IPtables | Get iptables rules
- command: iptables -L --wait
- register: iptablesrules
- check_mode: no
+- name: Netplugin IPtables | Add internal rules
+ iptables:
+ action: insert
+ chain: INPUT
+ protocol: "{{ item[0].split('/')[1] }}"
+ match: "{{ item[0].split('/')[1] }}"
+ destination_port: "{{ item[0].split('/')[0] }}"
+ source: "{{ item[1] }}"
+ jump: ACCEPT
+ comment: contiv
+ with_nested:
+ - "{{ contiv_netplugin_internal }}"
+ - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + contiv_netmaster_interface].ipv4.address)|list }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add [in] forward rules
+ iptables:
+ action: insert
+ chain: FORWARD
+ in_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netplugin_forward_interfaces }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add [out] forward rules
+ iptables:
+ action: insert
+ chain: FORWARD
+ out_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: contiv
+ with_items: "{{ contiv_netplugin_forward_interfaces }}"
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Add dns rule
+ iptables:
+ action: insert
+ chain: INPUT
+ protocol: udp
+ match: udp
+ destination_port: 53
+ in_interface: contivh0
+ jump: ACCEPT
+ comment: contiv
+ notify: Save iptables rules
- name: Netplugin IPtables | Enable iptables at boot
service:
name: iptables
enabled: yes
- state: started
-
-- name: Netplugin IPtables | Open Netmaster with iptables
- command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv"
- with_items:
- - "{{ netmaster_port }}"
- - "{{ contiv_rpc_port1 }}"
- - "{{ contiv_rpc_port2 }}"
- - "{{ contiv_rpc_port3 }}"
- - "{{ contiv_etcd_port }}"
- - "{{ kube_master_api_port }}"
- when: iptablesrules.stdout.find("contiv") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472"
- when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789"
- when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow from contivh0
- command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input"
- when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow to contivh0
- command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output"
- when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow from contivh1
- command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input"
- when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow to contivh1
- command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output"
- when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1
- notify: Save iptables rules
-
-- name: Netplugin IPtables | Allow dns
- command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns"
- when: iptablesrules.stdout.find("contiv dns") == -1
- notify: Save iptables rules
diff --git a/roles/contiv/tasks/old_version_cleanup.yml b/roles/contiv/tasks/old_version_cleanup.yml
new file mode 100644
index 000000000..8b3d88096
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup.yml
@@ -0,0 +1,43 @@
+---
+- name: Old version cleanup | Check if old auth proxy service exists
+ stat:
+ path: /etc/systemd/system/auth-proxy.service
+ register: auth_proxy_stat
+
+- name: Old version cleanup | Stop old auth proxy
+ service:
+ name: auth-proxy
+ enabled: no
+ state: stopped
+ when: auth_proxy_stat.stat.exists
+
+# Note(NB): The new containerized contiv-etcd service uses the same data
+# directory on the host, so etcd data is not lost.
+- name: Old version cleanup | Check if old contiv-etcd service exists
+ stat:
+ path: /etc/systemd/system/contiv-etcd.service
+ register: contiv_etcd_stat
+
+- name: Old version cleanup | Stop old contiv-etcd
+ service:
+ name: contiv-etcd
+ enabled: no
+ state: stopped
+ when: contiv_etcd_stat.stat.exists
+
+- name: Old version cleanup | Delete old files
+ file:
+ state: absent
+ path: "{{ item }}"
+ with_items:
+ - /etc/systemd/system/auth-proxy.service
+ - /var/contiv/certs
+ - /usr/bin/auth_proxy.sh
+ - /etc/systemd/system/contiv-etcd.service
+ - /etc/systemd/system/contiv-etcd.service.d
+
+- include_tasks: old_version_cleanup_iptables.yml
+ when: not contiv_has_firewalld and contiv_has_iptables
+
+- include_tasks: old_version_cleanup_firewalld.yml
+ when: contiv_has_firewalld
diff --git a/roles/contiv/tasks/old_version_cleanup_firewalld.yml b/roles/contiv/tasks/old_version_cleanup_firewalld.yml
new file mode 100644
index 000000000..675a6358a
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup_firewalld.yml
@@ -0,0 +1,11 @@
+---
+- name: Old version cleanup | Delete old firewalld rules
+ firewalld:
+ state: absent
+ immediate: true
+ permanent: true
+ port: "{{ item }}"
+ with_items:
+ - "9999/tcp"
+ - "6640/tcp"
+ - "8472/udp"
diff --git a/roles/contiv/tasks/old_version_cleanup_iptables.yml b/roles/contiv/tasks/old_version_cleanup_iptables.yml
new file mode 100644
index 000000000..513357606
--- /dev/null
+++ b/roles/contiv/tasks/old_version_cleanup_iptables.yml
@@ -0,0 +1,44 @@
+---
+- name: Old version cleanup | Delete old forward [in] iptables rules
+ iptables:
+ state: absent
+ chain: FORWARD
+ in_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: "{{ item }} FORWARD input"
+ with_items:
+ - contivh0
+ - contivh1
+ notify: Save iptables rules
+
+- name: Old version cleanup | Delete old forward [out] iptables rules
+ iptables:
+ state: absent
+ chain: FORWARD
+ out_interface: "{{ item }}"
+ jump: ACCEPT
+ comment: "{{ item }} FORWARD output"
+ with_items:
+ - contivh0
+ - contivh1
+ notify: Save iptables rules
+
+- name: Old version cleanup | Delete old input iptables rules
+ iptables:
+ state: absent
+ chain: INPUT
+ protocol: "{{ item.split('/')[1] }}"
+ match: "{{ item.split('/')[1] }}"
+ destination_port: "{{ item.split('/')[0] }}"
+ comment: "{{ item.split('/')[2] }}"
+ jump: ACCEPT
+ with_items:
+ - "53/udp/contiv dns"
+ - "4789/udp/netplugin vxlan 4789"
+ - "8472/udp/netplugin vxlan 8472"
+ - "9003/tcp/contiv"
+ - "9002/tcp/contiv"
+ - "9001/tcp/contiv"
+ - "9999/tcp/contiv"
+ - "10000/tcp/Contiv auth proxy service (10000)"
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml
index 5c92e90e9..21ba6ead4 100644
--- a/roles/contiv/tasks/ovs.yml
+++ b/roles/contiv/tasks/ovs.yml
@@ -1,6 +1,6 @@
---
- include_tasks: packageManagerInstall.yml
- when: source_type == "packageManager"
+ when: contiv_source_type == "packageManager"
tags:
- binary-update
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index d5726476c..8c8e7a7bd 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -4,10 +4,9 @@
did_install: false
- include_tasks: pkgMgrInstallers/centos-install.yml
- when: (ansible_os_family == "RedHat") and
- not is_atomic
+ when: ansible_os_family == "RedHat" and not openshift_is_atomic | bool
- name: Package Manager | Set fact saying we did CentOS package install
set_fact:
did_install: true
- when: (ansible_os_family == "RedHat")
+ when: ansible_os_family == "RedHat"
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 53c5b4099..2c82973d6 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -12,9 +12,9 @@
dest: /tmp/rdo-release-ocata-2.noarch.rpm
validate_certs: False
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
tags:
- ovs_install
@@ -30,9 +30,9 @@
pkg=openvswitch
state=present
environment:
- http_proxy: "{{ http_proxy|default('') }}"
- https_proxy: "{{ https_proxy|default('') }}"
- no_proxy: "{{ no_proxy|default('') }}"
+ http_proxy: "{{ contiv_http_proxy|default('') }}"
+ https_proxy: "{{ contiv_https_proxy|default('') }}"
+ no_proxy: "{{ contiv_no_proxy|default('') }}"
tags:
- ovs_install
register: result
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 9b3f12567..e2813c99d 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,10 +1,10 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift_docker_service_name }}.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ contiv_openshift_docker_service_name }}.service
[Service]
-ExecStart={{ bin_dir }}/aci_gw.sh start
-ExecStop={{ bin_dir }}/aci_gw.sh stop
+ExecStart={{ contiv_bin_dir }}/aci_gw.sh start
+ExecStop={{ contiv_bin_dir }}/aci_gw.sh stop
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv/templates/aci_gw.j2 b/roles/contiv/templates/aci_gw.j2
index ab4ad46a6..5ff349945 100644
--- a/roles/contiv/templates/aci_gw.j2
+++ b/roles/contiv/templates/aci_gw.j2
@@ -11,13 +11,13 @@ start)
set -e
docker run --net=host \
- -e "APIC_URL={{ apic_url }}" \
- -e "APIC_USERNAME={{ apic_username }}" \
- -e "APIC_PASSWORD={{ apic_password }}" \
- -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \
- -e "APIC_PHYS_DOMAIN={{ apic_phys_dom }}" \
- -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \
- -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \
+ -e "APIC_URL={{ contiv_apic_url }}" \
+ -e "APIC_USERNAME={{ contiv_apic_username }}" \
+ -e "APIC_PASSWORD={{ contiv_apic_password }}" \
+ -e "APIC_LEAF_NODE={{ contiv_apic_leaf_nodes }}" \
+ -e "APIC_PHYS_DOMAIN={{ contiv_apic_phys_dom }}" \
+ -e "APIC_EPG_BRIDGE_DOMAIN={{ contiv_apic_epg_bridge_domain }}" \
+ -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ contiv_apic_contracts_unrestricted_mode }}" \
--name=contiv-aci-gw \
contiv/aci-gw
;;
diff --git a/roles/contiv/templates/api-proxy-daemonset.yml.j2 b/roles/contiv/templates/api-proxy-daemonset.yml.j2
new file mode 100644
index 000000000..a15073580
--- /dev/null
+++ b/roles/contiv/templates/api-proxy-daemonset.yml.j2
@@ -0,0 +1,57 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-api-proxy
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-api-proxy
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-api-proxy
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-api-proxy
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: contiv-api-proxy
+ image: "{{ contiv_api_proxy_image_repo }}:{{ contiv_version }}"
+ args:
+ - "--listen-address=0.0.0.0:{{ contiv_api_proxy_port }}"
+ - --tls-key-file=/var/contiv/api_proxy_key.pem
+ - --tls-certificate=/var/contiv/api_proxy_cert.pem
+ - "--data-store-address={{ etcd_host }}"
+ - --data-store-driver=etcd
+ - "--netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}"
+ ports:
+ - containerPort: "{{ contiv_api_proxy_port }}"
+ hostPort: "{{ contiv_api_proxy_port }}"
+ volumeMounts:
+ - name: secret-volume
+ mountPath: /var/contiv
+ readOnly: true
+ volumes:
+ - name: secret-volume
+ secret:
+ secretName: contiv-api-proxy-secret
diff --git a/roles/contiv/templates/api-proxy-secrets.yml.j2 b/roles/contiv/templates/api-proxy-secrets.yml.j2
new file mode 100644
index 000000000..cd800c97d
--- /dev/null
+++ b/roles/contiv/templates/api-proxy-secrets.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: contiv-api-proxy-secret
+ namespace: kube-system
+ labels:
+ name: contiv-api-proxy-secret
+# Use data+b64encode, because stringData doesn't preserve newlines.
+data:
+ api_proxy_key.pem: "{{ key | b64encode }}"
+ api_proxy_cert.pem: "{{ cert | b64encode }}"
diff --git a/roles/contiv/templates/contiv.cfg.j2 b/roles/contiv/templates/contiv.cfg.j2
index f0e99c556..1dce9fcc2 100644
--- a/roles/contiv/templates/contiv.cfg.j2
+++ b/roles/contiv/templates/contiv.cfg.j2
@@ -1,5 +1,5 @@
{
- "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
"K8S_CA": "{{ openshift.common.config_base }}/node/ca.crt",
"K8S_KEY": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.key",
"K8S_CERT": "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.crt",
diff --git a/roles/contiv/templates/contiv.cfg.master.j2 b/roles/contiv/templates/contiv.cfg.master.j2
index fac8e3c4c..ca29b8001 100644
--- a/roles/contiv/templates/contiv.cfg.master.j2
+++ b/roles/contiv/templates/contiv.cfg.master.j2
@@ -1,5 +1,5 @@
{
- "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:{{ kube_master_api_port }}",
+ "K8S_API_SERVER": "https://{{ hostvars[groups['masters'][0]]['ansible_' + contiv_netmaster_interface].ipv4.address }}:{{ contiv_kube_master_api_port }}",
"K8S_CA": "{{ openshift.common.config_base }}/master/ca.crt",
"K8S_KEY": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.key",
"K8S_CERT": "{{ openshift.common.config_base }}/master/system:node:{{ openshift.common.hostname }}.crt",
diff --git a/roles/contiv/templates/etcd-daemonset.yml.j2 b/roles/contiv/templates/etcd-daemonset.yml.j2
new file mode 100644
index 000000000..76937e670
--- /dev/null
+++ b/roles/contiv/templates/etcd-daemonset.yml.j2
@@ -0,0 +1,83 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-etcd
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-etcd
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-etcd
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-etcd
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ initContainers:
+ - name: contiv-etcd-init
+ image: "{{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}"
+ env:
+ - name: ETCD_INIT_ARGSFILE
+ value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
+ - name: ETCD_INIT_LISTEN_PORT
+ value: "{{ contiv_etcd_port }}"
+ - name: ETCD_INIT_PEER_PORT
+ value: "{{ contiv_etcd_peer_port }}"
+ - name: ETCD_INIT_CLUSTER
+ value: "{{ contiv_etcd_peers }}"
+ - name: ETCD_INIT_DATA_DIR
+ value: "{{ contiv_etcd_data_dir }}"
+ volumeMounts:
+ - name: contiv-etcd-conf-dir
+ mountPath: "{{ contiv_etcd_conf_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ containers:
+ - name: contiv-etcd
+ image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
+ command:
+ - sh
+ - -c
+ - 'exec etcd $(cat "$ETCD_INIT_ARGSFILE")'
+ env:
+ - name: ETCD_INIT_ARGSFILE
+ value: "{{ contiv_etcd_conf_dir }}/contiv-etcd-args"
+ volumeMounts:
+ - name: contiv-etcd-conf-dir
+ mountPath: "{{ contiv_etcd_conf_dir }}"
+ - name: contiv-etcd-data-dir
+ mountPath: "{{ contiv_etcd_data_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ volumes:
+ - name: contiv-etcd-data-dir
+ hostPath:
+ type: DirectoryOrCreate
+ path: "{{ contiv_etcd_data_dir }}"
+ - name: contiv-etcd-conf-dir
+ hostPath:
+ type: DirectoryOrCreate
+ path: "{{ contiv_etcd_conf_dir }}"
diff --git a/roles/contiv/templates/etcd-proxy-daemonset.yml.j2 b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2
new file mode 100644
index 000000000..4ec6cfd76
--- /dev/null
+++ b/roles/contiv/templates/etcd-proxy-daemonset.yml.j2
@@ -0,0 +1,55 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-etcd-proxy
+ namespace: kube-system
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ name: contiv-etcd-proxy
+ template:
+ metadata:
+ namespace: kube-system
+ labels:
+ name: contiv-etcd-proxy
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ spec:
+ serviceAccountName: contiv-etcd
+ hostNetwork: true
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: NotIn
+ values:
+{% for node in groups.oo_masters_to_config %}
+ - "{{ node }}"
+{% endfor %}
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: contiv-etcd-proxy
+ image: "{{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}"
+ command:
+ - etcd
+ - "--proxy=on"
+ - "--listen-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
+ - "--advertise-client-urls=http://127.0.0.1:{{ contiv_etcd_port }}"
+ - "--initial-cluster={{ contiv_etcd_peers }}"
+ - "--data-dir={{ contiv_etcd_data_dir }}"
+ volumeMounts:
+ - name: contiv-etcd-data-dir
+ mountPath: "{{ contiv_etcd_data_dir }}"
+ securityContext:
+ runAsUser: "{{ contiv_etcd_system_uid }}"
+ fsGroup: "{{ contiv_etcd_system_gid }}"
+ volumes:
+ - name: contiv-etcd-data-dir
+ emptyDir: {}
diff --git a/roles/contiv/templates/etcd-scc.yml.j2 b/roles/contiv/templates/etcd-scc.yml.j2
new file mode 100644
index 000000000..6c4bb1d1e
--- /dev/null
+++ b/roles/contiv/templates/etcd-scc.yml.j2
@@ -0,0 +1,42 @@
+allowHostDirVolumePlugin: true
+allowHostIPC: false
+allowHostNetwork: true
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities: []
+allowedFlexVolumes: []
+apiVersion: v1
+defaultAddCapabilities: []
+fsGroup:
+ ranges:
+ - max: "{{ contiv_etcd_system_gid }}"
+ min: "{{ contiv_etcd_system_gid }}"
+ type: MustRunAs
+groups: []
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: 'For contiv-etcd only.'
+ creationTimestamp: null
+ name: contiv-etcd
+priority: null
+readOnlyRootFilesystem: true
+requiredDropCapabilities:
+- KILL
+- MKNOD
+- SETUID
+- SETGID
+runAsUser:
+ type: MustRunAs
+ uid: "{{ contiv_etcd_system_uid }}"
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: MustRunAs
+users:
+- system:serviceaccount:kube-system:contiv-etcd
+volumes:
+- emptyDir
+- hostPath
+- secret
diff --git a/roles/contiv/templates/netmaster.env.j2 b/roles/contiv/templates/netmaster.env.j2
deleted file mode 100644
index 5b5c84a2e..000000000
--- a/roles/contiv/templates/netmaster.env.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-NETMASTER_ARGS='--cluster-store etcd://{{ etcd_url }} --cluster-mode=kubernetes'
-
diff --git a/roles/contiv/templates/netmaster.j2 b/roles/contiv/templates/netmaster.j2
new file mode 100644
index 000000000..c9db122b5
--- /dev/null
+++ b/roles/contiv/templates/netmaster.j2
@@ -0,0 +1 @@
+NETMASTER_ARGS='--etcd={{ contiv_etcd_url }} --listen-url=127.0.0.1:{{ contiv_netmaster_port }} --fwdmode={{ contiv_netplugin_fwd_mode }} --infra={{ contiv_fabric_mode }} --control-url={{ contiv_netmaster_ctrl_ip }}:{{ contiv_netmaster_port }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service
index ce7d0c75e..b7289bc38 100644
--- a/roles/contiv/templates/netmaster.service
+++ b/roles/contiv/templates/netmaster.service
@@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
[Service]
EnvironmentFile=/etc/default/netmaster
-ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS
+ExecStart={{ contiv_bin_dir }}/netmaster $NETMASTER_ARGS
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2
index a4928cc3d..0fd727401 100644
--- a/roles/contiv/templates/netplugin.j2
+++ b/roles/contiv/templates/netplugin.j2
@@ -1,7 +1,6 @@
{% if contiv_encap_mode == "vlan" %}
-NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='--vlan-if={{ contiv_netplugin_interface }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
{% endif %}
{% if contiv_encap_mode == "vxlan" %}
-NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='--vtep-ip={{ contiv_netplugin_ctrl_ip }} --vxlan-port={{ contiv_vxlan_port }} --ctrl-ip={{ contiv_netplugin_ctrl_ip }} --etcd={{ contiv_etcd_url }} --fwdmode={{ contiv_netplugin_fwd_mode }} --cluster-mode=kubernetes --netmode={{ contiv_encap_mode }}'
{% endif %}
-
diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service
index 6358d89ec..2e1ca1bdf 100644
--- a/roles/contiv/templates/netplugin.service
+++ b/roles/contiv/templates/netplugin.service
@@ -4,7 +4,7 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service
[Service]
EnvironmentFile=/etc/default/netplugin
-ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS
+ExecStart={{ contiv_bin_dir }}/netplugin $NETPLUGIN_ARGS
KillMode=control-group
Restart=always
RestartSec=10
diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md
deleted file mode 100644
index 287b6c148..000000000
--- a/roles/contiv_auth_proxy/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-Role Name
-=========
-
-Role to install Contiv API Proxy and UI
-
-Requirements
-------------
-
-Docker needs to be installed to run the auth proxy container.
-
-Role Variables
---------------
-
-auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container.
-auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates.
-auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address.
-
-Dependencies
-------------
-
-docker
-
-Example Playbook
-----------------
-
-- hosts: netplugin-node
- become: true
- roles:
- - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 }
diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml
deleted file mode 100644
index e1d904c6a..000000000
--- a/roles/contiv_auth_proxy/defaults/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-auth_proxy_image: "contiv/auth_proxy:1.1.1"
-auth_proxy_port: 10000
-contiv_certs: "/var/contiv/certs"
-cluster_store: "etcd://{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
-auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
-auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
-auth_proxy_datastore: "{{ cluster_store }}"
-auth_proxy_binaries: "/var/contiv_cache"
-auth_proxy_local_install: False
-auth_proxy_rule_comment: "Contiv auth proxy service"
-service_vip: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}"
diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service
deleted file mode 100644
index 7cd2edff1..000000000
--- a/roles/contiv_auth_proxy/files/auth-proxy.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Contiv Proxy and UI
-After=auditd.service systemd-user-sessions.service time-sync.target docker.service
-
-[Service]
-ExecStart=/usr/bin/auth_proxy.sh start
-ExecStop=/usr/bin/auth_proxy.sh stop
-KillMode=control-group
-Restart=on-failure
-RestartSec=10
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/contiv_auth_proxy/files/cert.pem b/roles/contiv_auth_proxy/files/cert.pem
deleted file mode 100644
index 63df4603f..000000000
--- a/roles/contiv_auth_proxy/files/cert.pem
+++ /dev/null
@@ -1,33 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIFuTCCA6GgAwIBAgIJAOFyylO2zW2EMA0GCSqGSIb3DQEBCwUAMHMxCzAJBgNV
-BAYTAlVTMQswCQYDVQQIDAJDQTERMA8GA1UEBwwIU2FuIEpvc2UxDTALBgNVBAoM
-BENQU0cxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxHTAbBgNVBAMMFGF1dGgtbG9j
-YWwuY2lzY28uY29tMB4XDTE3MDcxMzE5NDYwMVoXDTI3MDcxMTE5NDYwMVowczEL
-MAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMREwDwYDVQQHDAhTYW4gSm9zZTENMAsG
-A1UECgwEQ1BTRzEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEdMBsGA1UEAwwUYXV0
-aC1sb2NhbC5jaXNjby5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
-AQDKCg26dvsD1u3f1lCaLlVptyTyGyanaJ73mlHiUnAMcu0A/p3kzluTeQLZJxtl
-MToM7rT/lun6fbhQC+7TQep9mufBzLhssyzRnT9rnGSeGwN66mO/rlYPZc5C1D7p
-7QZh1uLznzgOA2zMkgnI+n6LB2TZWg+XLhZZIr5SVYE18lj0tnwq3R1uznVv9t06
-grUYK2K7x0Y3Pt2e6yV0e1w2FOGH+7v3mm0c8r1+7U+4EZ2SM3fdG7nyTL/187gl
-yE8X4HOnAyYGbAnULJC02LR/DTQpv/RpLN/YJEpHZWApHZCKh+fbFdIhRRwEnT4L
-DLy3GJVFDEsmFaC91wf24+HAeUl9/hRIbxo9x/7kXmrhMlK38x2oo3cPh0XZxHje
-XmJUGG1OByAuIZaGFwS9lUuGTNvpN8P/v3HN/nORc0RE3fvoXIv4nuhaEfuo32q4
-dvO4aNjmxjz1JcUEx6DiMQe4ECaReYdvI+j9ZkUJj/e89iLsQ8gz5t3FTM+tmBi1
-hrRBAgWyRY5DKECVv2SNFiX55JQGA5vQDGw51qTTuhntfBhkHvhKL7V1FRZazx6N
-wqFyynig/jplb1ZNdKZ9ZxngZr6qHIx4RcGaJ9HdVhik7NyUCiHjWeGagzun2Omq
-FFXAD9Hmfctac5bGxx0FBi95kO8bd8b0GSIh2CWanETjawIDAQABo1AwTjAdBgNV
-HQ4EFgQU5P1g5gFZot//iwEV98MwW2YXzEMwHwYDVR0jBBgwFoAU5P1g5gFZot//
-iwEV98MwW2YXzEMwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbWgN
-BkFzzG5sbG7vUb23Ggv/0TCCuMtuKBGOBR0EW5Ssw6Aml7j3AGiy/1+2sdrQMsx2
-nVpexyQW5XS/X+8JjH7H7ifvwl3bVJ8xiR/9ioIJovrQojxQO0cUB2Lljj3bPd/R
-/tddAhPj0uN9N7UAejA12kXGa0Rrzb2U1rIpO9jnTbQYJiTOSzFiiGRMZWx3hfsW
-SDTpPmsV2Mh+jcmuxvPITl0s+vtqsm7SYoUZHwJ80LvrPbmk/5hTZGRsI3W5jipB
-PpOxvBnAWnQH3miMhty2TDaQ9JjYUwnxjFFZvNIYtp8+eH4nlbSldbgZoUeAe8It
-X6SsP8gT/uQh3TPvzNIfYROA7qTwoOQ8ZW8ssai/EttHAztFxketgNEfjwUTz8EJ
-yKeyAJ7qk3zD5k7p33ZNLWjmN0Awx3fCE9OQmNUyNX7PpYb4i+tHWu3h6Clw0RUf
-0gb1I+iyB3PXmpiYtxdMxGSi9CQIyWHzC4bsTQZkrzzIHWFSwewhUWOQ2Wko0hrv
-DnkS5k0cMPn5aNxw56H6OI+6hb+y/GGkTxNY9Gbxypx6lgZson0EY80EPZOJAORM
-XggJtTjiMpzvKh18DZY/Phmdh0C2tt8KYFdG83qLEhya9WZujbLAm38vIziFHbdX
-jOitXBSPyVrV3JvsCVksp+YC8Lnv3FsM494R4kA=
------END CERTIFICATE-----
diff --git a/roles/contiv_auth_proxy/files/key.pem b/roles/contiv_auth_proxy/files/key.pem
deleted file mode 100644
index 7224e569c..000000000
--- a/roles/contiv_auth_proxy/files/key.pem
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKQIBAAKCAgEAygoNunb7A9bt39ZQmi5Vabck8hsmp2ie95pR4lJwDHLtAP6d
-5M5bk3kC2ScbZTE6DO60/5bp+n24UAvu00HqfZrnwcy4bLMs0Z0/a5xknhsDeupj
-v65WD2XOQtQ+6e0GYdbi8584DgNszJIJyPp+iwdk2VoPly4WWSK+UlWBNfJY9LZ8
-Kt0dbs51b/bdOoK1GCtiu8dGNz7dnusldHtcNhThh/u795ptHPK9fu1PuBGdkjN3
-3Ru58ky/9fO4JchPF+BzpwMmBmwJ1CyQtNi0fw00Kb/0aSzf2CRKR2VgKR2Qiofn
-2xXSIUUcBJ0+Cwy8txiVRQxLJhWgvdcH9uPhwHlJff4USG8aPcf+5F5q4TJSt/Md
-qKN3D4dF2cR43l5iVBhtTgcgLiGWhhcEvZVLhkzb6TfD/79xzf5zkXNERN376FyL
-+J7oWhH7qN9quHbzuGjY5sY89SXFBMeg4jEHuBAmkXmHbyPo/WZFCY/3vPYi7EPI
-M+bdxUzPrZgYtYa0QQIFskWOQyhAlb9kjRYl+eSUBgOb0AxsOdak07oZ7XwYZB74
-Si+1dRUWWs8ejcKhcsp4oP46ZW9WTXSmfWcZ4Ga+qhyMeEXBmifR3VYYpOzclAoh
-41nhmoM7p9jpqhRVwA/R5n3LWnOWxscdBQYveZDvG3fG9BkiIdglmpxE42sCAwEA
-AQKCAgANVU6EoLd+EGAQZo9ZLXebi2eXxqztXV0oT/nZasFUQP1dFHCNGgU3HURP
-2mHXcsE2+0XcnDQCwOs59R+kt3PnKCLlSkJdghGSH8OAsYh+WqAHK5K7oqCxUXGk
-PWeNfoPuTwUZOMe1PQqgEX8t0UIqoKlKIsRmoLb+2Okge94UFlNCiwx0s7TujBd5
-9Ruycc/LsYlJhSQgHzj29OO65S03sHcVx0onU/yhbW+OAdFB/3+bl2PwppTF5cTB
-UX00mRyHIdvgCLgoslaPtwUxuh9nRxLLMozJqBl5pSN1xL3s2LOiQMfPUIhWg74O
-m+XtSsDlgGzRardG4ySBgsBWzcEnGWi5/xyc/6dtERzR382+CLUfOEoucGJHk6kj
-RdbVx5FCawpAzjs9Wo49Vr+WQceSiBfb2+ndNUTiD0wu7xLEVPcYC6CMk71qZv5H
-0qGlLhtkHF0nSQytbwqwfMz2SGDfkwIHgQ0gTKMpEMWK79E24ewE1BnMiaKC1bgk
-evB6WM1YZFMKS5L7fshJcbeMe9dhSF3s+Y0MYVv5MCL1VMZyIzAcj8mkPYZyBRUk
-MC87GnaebeTvHNtimvqCuWDGVI1SOoc1xtopkxinTqtIYGuQacrSmfyf9D3Rg4+l
-kB0ibtJV+HLP94q266aef/PdpXszs7zo0h6skpLItW/jAuSNuQKCAQEA/VdXpMi8
-nfOtXwOZlGA2+jShYyHyCl2TKgbpfDGl1yKNkbBrIu2/PEl1DpmzSeG1tdNCzN68
-4vEjpF/jBsdSJj4BDiRY6HEcURXpw4yTZ7oCnUCbzadLIo3wX/gFDEVZz+0nQQ29
-5x0XGuQnJXC2fe/CyrkfltKhFSYoTSjtMbma4Pm3Q3HP3wGOvoUKtKNDO5rF26Qh
-YtqJgJSKBAms0wKiy9VVTa6DaXrtSnXTR+Ltud3xnWBrX1Z+idwxYt/Be5W2woHf
-M5zPIqMUgry5ujtRxhLmleFXDAYbaIQR9AZXlSS3w+9Gcl5EDRkFXqlaoCfppwTR
-wakj2lNjbAidPwKCAQEAzCjgko4/Yss/0dCs8ySKd2IaRF93OwC/E2SHVqe5bATh
-rVmDn/KIH4J2fI4FiaIHELT1CU5vmganYbK2k7CoJztjJltM1B7rkpHiVSL+qMqn
-yBZFg3LFq9eiBPZHyQEc+HMJUhFRexjdeqLH78HCoPz1QnKo2xRoGHhSQ/Rh6lXo
-20tldL9HrSxPRmwxnyLgWGcWopv/92JNxu6FgnZcnsVjkpO2mriLD7+Ty5qfvkwc
-RFDBYnq2JjBcvqngrzDIGDzC7hTA5BRuuQdNMZggJwO6nKdZDUrq5NIo9B07FLj1
-IRMVm7D1vJYzYI6HW7Wj4vNRXMY8jG1fwvNG0+xy1QKCAQEA7m14R9bAZWuDnGt3
-7APNWheUWAcHk6fTq/cLYV4cdWfIkvfVLO9STrvXliEjcoIhkPk94jAy1ucZo0a3
-FJccgm9ScOvWXRSvEMUt12ODC1ktwq+esqMi/GdXdgqnPZA7YYwRqJD1TAC90Qou
-qXb12Xp/+mjWCQ08mvnpbgz5hxXmZJvAVZJUj84YeMgfdjg9O2iDlB5ZaX7BcCjb
-58bvRzww2ONzQAPhG7Gch7pyWTKCh64RCgtHold2CesY87QglV4mvdKarSmEbFXN
-JOnXZiUT5fW93AtS8DcDLo81klMxtGT1KksUIukC5MzKl/eNGjPWG+FWRAwaeQyI
-ApHs4wKCAQAI10RSVGKeTprm5Rh4Nv7gCJmGmHO7VF7x4gqSUBURfmyfax7uEDyg
-0K982VGYEjIoIQ3zZzgh/WPGMU0CvEWr3UB/6rg6/1PINxUMBsXsXUpCueQsuw2g
-UWgsutWE+M1eXOzsZt+Waw88PkxWL5fUDOA6DmkNg6a2WI+Hbc/HrAy3Yl50Xcwm
-zaJpNEo5z/LTITOzuvmsps8jbDTP33xHS9jyAf+IV7F97xfhW0LLpNQciTq2nwXA
-RZvejdCzBXPEyOzQDooD1natAInxOds6lUjBe+W5U6M0YX1whMuILDJBSmhHI7Sg
-hAiZh9KIwCbmrw6468S3eA0LjillB/o5AoIBAQCg93syT50nYF2UWWP/rEa7qf6h
-+YpBPpJskIl3NDMJtie9OcdsoFpjblpFbsMqsSag9KhGl7wn4f8qXO0HERSb8oYd
-1Zu6BgUCuRXuAKNI4f508IooNpXx9y7xxl4giFBnDPa6W3KWqZ2LMDt92htMd/Zm
-qvoyYZhFhMSyKFzPDAFdsZijJgahqJRKhHeW9BsPqho5i7Ys+PhE8e/vUZs2zUeS
-QEHWhVisDTNKOoJIdz7JXFgEXCPTLAxXIIhYSkIfQxHxsWjt0vs79tzUkV8NlpKt
-d7s0iyHnD6kDvoxYOSI9YmSEnnFBFdgeiD+/VD+7enOdqb5MHsjuw+by09ft
------END RSA PRIVATE KEY-----
diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml
deleted file mode 100644
index 9cb9bea49..000000000
--- a/roles/contiv_auth_proxy/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for auth_proxy
diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml
deleted file mode 100644
index a29659cc9..000000000
--- a/roles/contiv_auth_proxy/tasks/cleanup.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-
-- name: stop auth-proxy container
- service: name=auth-proxy state=stopped
-
-- name: cleanup iptables for auth proxy
- shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
- become: true
- with_items:
- - "{{ auth_proxy_port }}"
diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml
deleted file mode 100644
index 74e7bf794..000000000
--- a/roles/contiv_auth_proxy/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# tasks file for auth_proxy
-- name: setup iptables for auth proxy
- shell: >
- ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \
- iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
- become: true
- with_items:
- - "{{ auth_proxy_port }}"
-
-# Load the auth-proxy-image from local tar. Ignore any errors to handle the
-# case where the image is not built in
-- name: copy auth-proxy image
- copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar
- when: auth_proxy_local_install == True
-
-- name: load auth-proxy image
- shell: docker load -i /tmp/auth-proxy-image.tar
- when: auth_proxy_local_install == True
-
-- name: create cert folder for proxy
- file: path=/var/contiv/certs state=directory
-
-- name: copy shell script for starting auth-proxy
- template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx
-
-- name: copy cert for starting auth-proxy
- copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r
-
-- name: copy key for starting auth-proxy
- copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r
-
-- name: copy systemd units for auth-proxy
- copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service
-
-- name: start auth-proxy container
- systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes
diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2
deleted file mode 100644
index 0ab8c831b..000000000
--- a/roles/contiv_auth_proxy/templates/auth_proxy.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-usage="$0 start/stop"
-if [ $# -ne 1 ]; then
- echo USAGE: $usage
- exit 1
-fi
-
-case $1 in
-start)
- set -e
-
- /usr/bin/docker run --rm \
- -p 10000:{{ auth_proxy_port }} \
- --net=host --name=auth-proxy \
- -e NO_NETMASTER_STARTUP_CHECK=1 \
- -v /var/contiv:/var/contiv:z \
- {{ auth_proxy_image }} \
- --tls-key-file={{ auth_proxy_key }} \
- --tls-certificate={{ auth_proxy_cert }} \
- --data-store-address={{ auth_proxy_datastore }} \
- --netmaster-address={{ service_vip }}:9999 \
- --listen-address=:10000
- ;;
-
-stop)
- # don't stop on error
- /usr/bin/docker stop auth-proxy
- /usr/bin/docker rm -f -v auth-proxy
- ;;
-
-*)
- echo USAGE: $usage
- exit 1
- ;;
-esac
diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory
deleted file mode 100644
index d18580b3c..000000000
--- a/roles/contiv_auth_proxy/tests/inventory
+++ /dev/null
@@ -1 +0,0 @@
-localhost \ No newline at end of file
diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml
deleted file mode 100644
index 2af3250cd..000000000
--- a/roles/contiv_auth_proxy/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- remote_user: root
- roles:
- - auth_proxy
diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml
deleted file mode 100644
index 9032766c4..000000000
--- a/roles/contiv_auth_proxy/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for auth_proxy
diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml
index 7b8150954..c1622c56a 100644
--- a/roles/contiv_facts/defaults/main.yaml
+++ b/roles/contiv_facts/defaults/main.yaml
@@ -1,13 +1,10 @@
---
# The directory where binaries are stored on Ansible
# managed systems.
-bin_dir: /usr/bin
+contiv_bin_dir: /usr/bin
# The directory used by Ansible to temporarily store
# files on Ansible managed systems.
-ansible_temp_dir: /tmp/.ansible/files
+contiv_ansible_temp_dir: /tmp/.ansible/files
-source_type: packageManager
-
-# Whether or not to also install and enable the Contiv auth_proxy
-contiv_enable_auth_proxy: false
+contiv_source_type: packageManager
diff --git a/roles/contiv_facts/tasks/fedora-install.yml b/roles/contiv_facts/tasks/fedora-install.yml
index 932ff091a..b8239a636 100644
--- a/roles/contiv_facts/tasks/fedora-install.yml
+++ b/roles/contiv_facts/tasks/fedora-install.yml
@@ -11,9 +11,9 @@
retries: 5
delay: 10
environment:
- https_proxy: "{{ https_proxy }}"
- http_proxy: "{{ http_proxy }}"
- no_proxy: "{{ no_proxy }}"
+ https_proxy: "{{ contiv_https_proxy }}"
+ http_proxy: "{{ contiv_http_proxy }}"
+ no_proxy: "{{ contiv_no_proxy }}"
- name: Install libselinux-python
command: dnf install {{ item }} -y
@@ -21,6 +21,6 @@
- python-dnf
- libselinux-python
environment:
- https_proxy: "{{ https_proxy }}"
- http_proxy: "{{ http_proxy }}"
- no_proxy: "{{ no_proxy }}"
+ https_proxy: "{{ contiv_https_proxy }}"
+ http_proxy: "{{ contiv_http_proxy }}"
+ no_proxy: "{{ contiv_no_proxy }}"
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 3267a4ab0..11f1e1369 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -1,60 +1,31 @@
---
-- name: Determine if Atomic
- stat: path=/run/ostree-booted
- register: s
- changed_when: false
- check_mode: no
-
-- name: Init the is_atomic fact
- set_fact:
- is_atomic: false
-
-- name: Set the is_atomic fact
- set_fact:
- is_atomic: true
- when: s.stat.exists
-
- name: Determine if CoreOS
raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"
register: distro
check_mode: no
-- name: Init the is_coreos fact
+- name: Init the contiv_is_coreos fact
set_fact:
- is_coreos: false
+ contiv_is_coreos: false
-- name: Set the is_coreos fact
+- name: Set the contiv_is_coreos fact
set_fact:
- is_coreos: true
+ contiv_is_coreos: true
when: "'CoreOS' in distro.stdout"
-- name: Set docker config file directory
- set_fact:
- docker_config_dir: "/etc/sysconfig"
-
-- name: Override docker config file directory for Debian
- set_fact:
- docker_config_dir: "/etc/default"
- when: ansible_distribution == "Debian" or ansible_distribution == "Ubuntu"
-
-- name: Create config file directory
- file:
- path: "{{ docker_config_dir }}"
- state: directory
-
- name: Set the bin directory path for CoreOS
set_fact:
- bin_dir: "/opt/bin"
- when: is_coreos
+ contiv_bin_dir: "/opt/bin"
+ when: contiv_is_coreos
- name: Create the directory used to store binaries
file:
- path: "{{ bin_dir }}"
+ path: "{{ contiv_bin_dir }}"
state: directory
- name: Create Ansible temp directory
file:
- path: "{{ ansible_temp_dir }}"
+ path: "{{ contiv_ansible_temp_dir }}"
state: directory
- name: Determine if has rpm
@@ -63,26 +34,26 @@
changed_when: false
check_mode: no
-- name: Init the has_rpm fact
+- name: Init the contiv_has_rpm fact
set_fact:
- has_rpm: false
+ contiv_has_rpm: false
-- name: Set the has_rpm fact
+- name: Set the contiv_has_rpm fact
set_fact:
- has_rpm: true
+ contiv_has_rpm: true
when: s.stat.exists
-- name: Init the has_firewalld fact
+- name: Init the contiv_has_firewalld fact
set_fact:
- has_firewalld: false
+ contiv_has_firewalld: false
-- name: Init the has_iptables fact
+- name: Init the contiv_has_iptables fact
set_fact:
- has_iptables: false
+ contiv_has_iptables: false
# collect information about what packages are installed
- include_tasks: rpm.yml
- when: has_rpm
+ when: contiv_has_rpm
- include_tasks: fedora-install.yml
- when: not is_atomic and ansible_distribution == "Fedora"
+ when: not openshift_is_atomic and ansible_distribution == "Fedora"
diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml
index d12436f96..dc6c5d3b7 100644
--- a/roles/contiv_facts/tasks/rpm.yml
+++ b/roles/contiv_facts/tasks/rpm.yml
@@ -13,9 +13,9 @@
failed_when: false
check_mode: no
-- name: Set the has_firewalld fact
+- name: Set the contiv_has_firewalld fact
set_fact:
- has_firewalld: true
+ contiv_has_firewalld: true
when: s.rc == 0 and ss.rc == 0
- name: Determine if iptables-services installed
@@ -25,7 +25,7 @@
failed_when: false
check_mode: no
-- name: Set the has_iptables fact
+- name: Set the contiv_has_iptables fact
set_fact:
- has_iptables: true
+ contiv_has_iptables: true
when: s.rc == 0
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 86cea5c46..87e249642 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: ''
l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
# runc, docker, host
-r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}"
r_etcd_common_embedded_etcd: false
osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
@@ -98,4 +98,4 @@ r_etcd_os_firewall_allow:
# set the backend quota to 4GB by default
etcd_quota_backend_bytes: 4294967296
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index f2e1fc310..af58eff62 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -17,6 +17,5 @@ galaxy_info:
- system
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
- role: lib_utils
- role: openshift_facts
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index ccfd9da14..881a8c270 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -1,7 +1,7 @@
---
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
index 119071a72..ce295d2f5 100644
--- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
@@ -28,7 +28,7 @@
etcd_client_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool
else (False in (g_external_etcd_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure generated_certs directory present
@@ -57,6 +57,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the client crt
delegated_serial_command:
command: >
@@ -79,13 +80,6 @@
when: etcd_client_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX
- register: g_etcd_client_mktemp
- changed_when: False
- when: etcd_client_certs_missing | bool
- become: no
-
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz
@@ -101,8 +95,7 @@
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ g_etcd_client_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_client_certs_missing | bool
@@ -116,10 +109,15 @@
- name: Unarchive etcd cert tarballs
unarchive:
- src: "{{ g_etcd_client_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_client_certs_missing | bool
+- name: Delete temporary directory
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
+ changed_when: False
+ when: etcd_client_certs_missing | bool
+
- file:
path: "{{ etcd_cert_config_dir }}/{{ item }}"
owner: root
@@ -130,9 +128,3 @@
- "{{ etcd_cert_prefix }}client.key"
- "{{ etcd_cert_prefix }}ca.crt"
when: etcd_client_certs_missing | bool
-
-- name: Delete temporary directory
- local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
- changed_when: False
- when: etcd_client_certs_missing | bool
- become: no
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index deb2301d7..7c8b87d99 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -21,7 +21,7 @@
etcd_server_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool
else (False in (g_etcd_server_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure generated_certs directory present
@@ -50,6 +50,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the server crt
delegated_serial_command:
command: >
@@ -83,6 +84,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the peer crt
delegated_serial_command:
command: >
@@ -105,13 +107,6 @@
when: etcd_server_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/etcd_certificates-XXXXXXX
- become: no
- register: g_etcd_server_mktemp
- changed_when: False
- when: etcd_server_certs_missing | bool
-
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz
@@ -127,8 +122,7 @@
- name: Retrieve etcd cert tarball
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ g_etcd_server_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_server_certs_missing | bool
@@ -144,7 +138,7 @@
- name: Unarchive cert tarball
unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing | bool
@@ -161,8 +155,7 @@
- name: Retrieve etcd ca cert tarball
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ g_etcd_server_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: etcd_server_certs_missing | bool
@@ -177,8 +170,7 @@
when: etcd_server_certs_missing | bool
- name: Delete temporary directory
- local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
- become: no
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
changed_when: False
when: etcd_server_certs_missing | bool
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index a4b0ff31d..3d945344c 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- {{ openshift.common.client_binary }} adm migrate etcd-ttl \
+ {{ openshift_client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml
index 54a9c74ff..630640ab1 100644
--- a/roles/etcd/tasks/migration/migrate.yml
+++ b/roles/etcd/tasks/migration/migrate.yml
@@ -1,7 +1,7 @@
---
# Should this be run in a serial manner?
- set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+ l_etcd_service: "{{ 'etcd_container' if (openshift_is_containerized | bool) else 'etcd' }}"
- name: Migrate etcd data
command: >
diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml
index fe1e418d8..ab3626cec 100644
--- a/roles/etcd/tasks/version_detect.yml
+++ b/roles/etcd/tasks/version_detect.yml
@@ -12,7 +12,7 @@
- debug:
msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
when:
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
- block:
- name: Record containerized etcd version (docker)
@@ -52,4 +52,4 @@
- debug:
msg: "Etcd containerized version {{ etcd_container_version }} detected"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
index 2e4a0dc39..d9e4d2354 100644
--- a/roles/flannel/defaults/main.yaml
+++ b/roles/flannel/defaults/main.yaml
@@ -6,4 +6,4 @@ etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt"
etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt"
etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 7d79bd3d4..f94399fab 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -21,3 +21,7 @@
until: not (l_restart_node_result is failed)
retries: 3
delay: 30
+
+- name: save iptable rules
+ become: yes
+ command: 'iptables-save'
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
index 51128dba6..7634b8192 100644
--- a/roles/flannel/meta/main.yml
+++ b/roles/flannel/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index 9b9250f31..11981fb80 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,7 +2,7 @@
- name: Install flannel
become: yes
package: name=flannel state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
@@ -41,3 +41,13 @@
notify:
- restart docker
- restart node
+
+- name: Enable Pod to Pod communication
+ command: /sbin/iptables --wait -I FORWARD -d {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -i {{ flannel_interface }} -j ACCEPT -m comment --comment "Pod to Pod communication"
+ notify:
+ - save iptable rules
+
+- name: Allow external network access
+ command: /sbin/iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE -m comment --comment "Allow external network access"
+ notify:
+ - save iptable rules
diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml
index 73bddcca4..1e44ff5ba 100644
--- a/roles/flannel_register/meta/main.yml
+++ b/roles/flannel_register/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 83ca83350..da7e7b1da 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -31,6 +31,7 @@ class CallbackModule(CallbackBase):
'installer_phase_node',
'installer_phase_glusterfs',
'installer_phase_hosted',
+ 'installer_phase_web_console',
'installer_phase_metrics',
'installer_phase_logging',
'installer_phase_prometheus',
@@ -80,6 +81,10 @@ class CallbackModule(CallbackBase):
'title': 'Hosted Install',
'playbook': 'playbooks/openshift-hosted/config.yml'
},
+ 'installer_phase_web_console': {
+ 'title': 'Web Console Install',
+ 'playbook': 'playbooks/openshift-web-console/config.yml'
+ },
'installer_phase_metrics': {
'title': 'Metrics Install',
'playbook': 'playbooks/openshift-metrics/config.yml'
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
index 7fd5adf41..7eb8ed781 100644
--- a/roles/kuryr/meta/main.yml
+++ b/roles/kuryr/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_utils
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index 08f2d5adc..41d0ead20 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -40,7 +40,7 @@
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
- line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+ line: 'OPTIONS="\1 --disable proxy"'
- name: force node restart to disable the proxy
service:
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
index 39348ae90..09f4c7dfe 100644
--- a/roles/kuryr/templates/cni-daemonset.yaml.j2
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -26,6 +26,13 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: IfNotPresent
command: [ "cni_ds_init" ]
+ env:
+ - name: CNI_DAEMON
+ value: "True"
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@@ -38,6 +45,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
+ - name: proc
+ mountPath: /host_proc
+ - name: openvswitch
+ mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@@ -50,4 +61,10 @@ spec:
name: kuryr-config
- name: etc
hostPath:
- path: /etc \ No newline at end of file
+ path: /etc
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: openvswitch
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 96c215f00..4bf1dbddf 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -16,17 +16,17 @@ data:
# Directory for Kuryr vif binding executables. (string value)
#bindir = /usr/libexec/kuryr
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
- # DEPRECATED: If set to false, the logging level will be set to WARNING instead
- # of the default INFO level. (boolean value)
- # This option is deprecated for removal.
- # Its value may be silently ignored in the future.
- #verbose = true
-
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
@@ -46,7 +46,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/kuryr-controller.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -65,13 +65,19 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
- #use_stderr = true
+ #use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
@@ -93,7 +99,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -106,15 +112,86 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
- driver = kuryr.lib.binding.drivers.vlan
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
link_iface = eth0
+ driver = kuryr.lib.binding.drivers.vlan
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
+
[kubernetes]
#
@@ -164,11 +241,6 @@ data:
# The driver that manages VIFs pools for Kubernetes Pods (string value)
vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
- [vif_pool]
- ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
- ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
- ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
- ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
@@ -232,13 +304,55 @@ data:
external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
+
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
+
kuryr-cni.conf: |+
[DEFAULT]
#
# From kuryr_kubernetes
#
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
@@ -263,7 +377,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/cni.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -282,6 +396,12 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
@@ -310,7 +430,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -323,14 +443,85 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
+ link_iface = eth0
driver = kuryr.lib.binding.drivers.vlan
- link_iface = {{ kuryr_cni_link_interface }}
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
[kubernetes]
@@ -341,12 +532,136 @@ data:
# The root URL of the Kubernetes API (string value)
api_root = {{ openshift.master.api_url }}
- # The token to talk to the k8s API
- token_file = /etc/kuryr/token
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
# Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
- ssl_ca_crt_file = /etc/kuryr/ca.crt
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
# HTTPS K8S_API server identity verification (boolean value)
# TODO (apuimedo): Make configurable
ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
index d970270b5..155d1faab 100644
--- a/roles/kuryr/templates/controller-deployment.yaml.j2
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -22,6 +22,13 @@ spec:
- image: kuryr/controller:latest
imagePullPolicy: IfNotPresent
name: controller
+{% if kuryr_openstack_enable_pools | default(false) %}
+ readinessProbe:
+ exec:
+ command:
+ - cat
+ - /tmp/pools_loaded
+{% endif %}
terminationMessagePath: "/dev/termination-log"
# FIXME(dulek): This shouldn't be required, but without it selinux is
# complaining about access to kuryr.conf.
diff --git a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py b/roles/lib_openshift/library/conditional_set_fact.py
index f61801714..363399f33 100644
--- a/roles/openshift_sanitize_inventory/library/conditional_set_fact.py
+++ b/roles/lib_openshift/library/conditional_set_fact.py
@@ -29,6 +29,10 @@ EXAMPLES = '''
fact1: not_defined_variable
fact2: defined_variable
+- name: Conditionally set fact falling back on default
+ conditional_set_fact:
+ fact1: not_defined_var | defined_variable
+
'''
@@ -48,12 +52,14 @@ def run_module():
is_changed = False
for param in module.params['vars']:
- other_var = module.params['vars'][param]
-
- if other_var in module.params['facts']:
- local_facts[param] = module.params['facts'][other_var]
- if not is_changed:
- is_changed = True
+ other_vars = module.params['vars'][param].replace(" ", "")
+
+ for other_var in other_vars.split('|'):
+ if other_var in module.params['facts']:
+ local_facts[param] = module.params['facts'][other_var]
+ if not is_changed:
+ is_changed = True
+ break
return module.exit_json(changed=is_changed, # noqa: F405
ansible_facts=local_facts)
diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py
index d810735f2..9d10c84f3 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py
@@ -27,7 +27,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing a list '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 2,
@@ -71,8 +71,296 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_state_present(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a state present '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 2,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 2)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_up(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale up '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 3,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ dc_updated = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6559",
+ "generation": 9,
+ "creationTimestamp": "2017-01-24T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 3,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc_updated,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 3)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_down(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale down '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 1,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ dc_updated = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6560",
+ "generation": 9,
+ "creationTimestamp": "2017-01-24T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 1,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc_updated,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['state'], 'present')
+ self.assertEqual(results['result'][0], 1)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_failed(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing a scale failure '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 1,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+ error_message = "foo"
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc replace',
+ 'results': error_message,
+ 'returncode': 1}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['failed'])
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_state_unknown(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing an unknown state '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 2,
+ 'state': 'unknown-state',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ dc = '''{"kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 2,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get dc router -n default',
+ 'results': dc,
+ 'returncode': 0}]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse('changed' in results)
+ self.assertEqual(results['failed'], True)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
def test_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing scale '''
params = {'name': 'router',
'namespace': 'default',
'replicas': 3,
@@ -120,8 +408,57 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_scale_rc(self, mock_openshift_cmd, mock_tmpfile_copy):
+ ''' Testing scale for replication controllers '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'replicas': 3,
+ 'state': 'list',
+ 'kind': 'rc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ rc = '''{"kind": "ReplicationController",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/oapi/v1/namespaces/default/deploymentconfigs/router",
+ "uid": "a441eedc-e1ae-11e6-a2d5-0e6967f34d42",
+ "resourceVersion": "6558",
+ "generation": 8,
+ "creationTimestamp": "2017-01-23T20:58:07Z",
+ "labels": {
+ "router": "router"
+ }
+ },
+ "spec": {
+ "replicas": 3,
+ }
+ }'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc get rc router -n default',
+ 'results': rc,
+ 'returncode': 0},
+ {"cmd": '/usr/bin/oc create -f /tmp/router -n default',
+ 'results': '',
+ 'returncode': 0}
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['result'][0], 3)
+
+ @mock.patch('oc_scale.Utils.create_tmpfile_copy')
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
def test_no_dc_scale(self, mock_openshift_cmd, mock_tmpfile_copy):
- ''' Testing a get '''
+ ''' Testing scale for inexisting dc '''
params = {'name': 'not_there',
'namespace': 'default',
'replicas': 3,
@@ -205,7 +542,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup fallback '''
+ ''' Testing binary lookup fallback in py3 '''
mock_env_get.side_effect = lambda _v, _d: ''
@@ -217,7 +554,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in path '''
+ ''' Testing binary lookup in path in py3 '''
oc_bin = '/usr/bin/oc'
@@ -231,7 +568,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in /usr/local/bin '''
+ ''' Testing binary lookup in /usr/local/bin in py3 '''
oc_bin = '/usr/local/bin/oc'
@@ -245,7 +582,7 @@ class OCScaleTest(unittest.TestCase):
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
- ''' Testing binary lookup in ~/bin '''
+ ''' Testing binary lookup in ~/bin in py3 '''
oc_bin = os.path.expanduser('~/bin/oc')
diff --git a/roles/lib_os_firewall/README.md b/roles/lib_os_firewall/README.md
deleted file mode 100644
index ba8c84865..000000000
--- a/roles/lib_os_firewall/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-lib_os_firewall
-===========
-
-lib_os_firewall manages iptables firewall settings for a minimal use
-case (Adding/Removing rules based on protocol and port number).
-
-Note: firewalld is not supported on Atomic Host
-https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-
-Requirements
-------------
-
-Ansible 2.2
-
-Role Variables
---------------
-
-| Name | Default | |
-|---------------------------|---------|----------------------------------------|
-| os_firewall_allow | [] | List of service,port mappings to allow |
-| os_firewall_deny | [] | List of service, port mappings to deny |
-
-Dependencies
-------------
-
-None.
-
-Example Playbook
-----------------
-
-Use iptables and open tcp ports 80 and 443:
-```
----
-- hosts: servers
- vars:
- os_firewall_use_firewalld: false
- os_firewall_allow:
- - service: httpd
- port: 80/tcp
- - service: https
- port: 443/tcp
- tasks:
- - include_role:
- name: lib_os_firewall
-
- - name: set allow rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: add
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- with_items: "{{ os_firewall_allow }}"
-```
-
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-Jason DeTiberus - jdetiber@redhat.com
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
index eb13a58ba..eb13a58ba 100644
--- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
+++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py
new file mode 100644
index 000000000..09ce55e8f
--- /dev/null
+++ b/roles/lib_utils/action_plugins/sanity_checks.py
@@ -0,0 +1,181 @@
+"""
+Ansible action plugin to ensure inventory variables are set
+appropriately and no conflicting options have been provided.
+"""
+import re
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+# Valid values for openshift_deployment_type
+VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
+
+# Tuple of variable names and default values if undefined.
+NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
+ ('openshift_use_flannel', False),
+ ('openshift_use_nuage', False),
+ ('openshift_use_contiv', False),
+ ('openshift_use_calico', False))
+
+ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
+v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
+v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
+You specified openshift_image_tag={}"""
+
+ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
+v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+You specified openshift_image_tag={}"""
+
+ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)',
+ 'error_msg': ORIGIN_TAG_REGEX_ERROR}
+ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
+ 'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
+IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
+ 'openshift-enterprise': ENTERPRISE_TAG_REGEX}
+
+CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release,
+you must set openshift_release or openshift_image_tag in your inventory to
+specify which version of the OpenShift component images to use.
+(Suggestion: add openshift_release="x.y" to inventory.)"""
+
+
+def to_bool(var_to_check):
+ """Determine a boolean value given the multiple
+ ways bools can be specified in ansible."""
+ # http://yaml.org/type/bool.html
+ yes_list = (True, 1, "True", "1", "true", "TRUE",
+ "Yes", "yes", "Y", "y", "YES",
+ "on", "ON", "On")
+ return var_to_check in yes_list
+
+
+class ActionModule(ActionBase):
+ """Action plugin to execute sanity checks."""
+ def template_var(self, hostvars, host, varname):
+ """Retrieve a variable from hostvars and template it.
+ If undefined, return None type."""
+ res = hostvars[host].get(varname)
+ if res is None:
+ return None
+ return self._templar.template(res)
+
+ def check_openshift_deployment_type(self, hostvars, host):
+ """Ensure a valid openshift_deployment_type is set"""
+ openshift_deployment_type = self.template_var(hostvars, host,
+ 'openshift_deployment_type')
+ if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
+ type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
+ msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
+ raise errors.AnsibleModuleError(msg)
+ return openshift_deployment_type
+
+ def check_python_version(self, hostvars, host, distro):
+ """Ensure python version is 3 for Fedora and python 2 for others"""
+ ansible_python = self.template_var(hostvars, host, 'ansible_python')
+ if distro == "Fedora":
+ if ansible_python['version']['major'] != 3:
+ msg = "openshift-ansible requires Python 3 for {};".format(distro)
+ msg += " For information on enabling Python 3 with Ansible,"
+ msg += " see https://docs.ansible.com/ansible/python_3_support.html"
+ raise errors.AnsibleModuleError(msg)
+ else:
+ if ansible_python['version']['major'] != 2:
+ msg = "openshift-ansible requires Python 2 for {};".format(distro)
+
+ def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
+ """Ensure openshift_image_tag is formatted correctly"""
+ openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
+ if not openshift_image_tag or openshift_image_tag == 'latest':
+ return None
+ regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
+ res = re.match(regex_to_match, str(openshift_image_tag))
+ if res is None:
+ msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
+ msg = msg.format(str(openshift_image_tag))
+ raise errors.AnsibleModuleError(msg)
+
+ def no_origin_image_version(self, hostvars, host, openshift_deployment_type):
+ """Ensure we can determine what image version to use with origin
+ fail when:
+ - openshift_is_containerized
+ - openshift_deployment_type == 'origin'
+ - openshift_release is not defined
+ - openshift_image_tag is not defined"""
+ if not openshift_deployment_type == 'origin':
+ return None
+ oic = self.template_var(hostvars, host, 'openshift_is_containerized')
+ if not to_bool(oic):
+ return None
+ orelease = self.template_var(hostvars, host, 'openshift_release')
+ oitag = self.template_var(hostvars, host, 'openshift_image_tag')
+ if not orelease and not oitag:
+ raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG)
+
+ def network_plugin_check(self, hostvars, host):
+ """Ensure only one type of network plugin is enabled"""
+ res = []
+ # Loop through each possible network plugin boolean, determine the
+ # actual boolean value, and append results into a list.
+ for plugin, default_val in NET_PLUGIN_LIST:
+ res_temp = self.template_var(hostvars, host, plugin)
+ if res_temp is None:
+ res_temp = default_val
+ res.append(to_bool(res_temp))
+
+ if sum(res) != 1:
+ plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
+
+ msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
+ raise errors.AnsibleModuleError(msg)
+
+ def check_hostname_vars(self, hostvars, host):
+ """Checks to ensure openshift_hostname
+ and openshift_public_hostname
+ conform to the proper length of 63 characters or less"""
+ for varname in ('openshift_public_hostname', 'openshift_hostname'):
+ var_value = self.template_var(hostvars, host, varname)
+ if var_value and len(var_value) > 63:
+ msg = '{} must be 63 characters or less'.format(varname)
+ raise errors.AnsibleModuleError(msg)
+
+ def run_checks(self, hostvars, host):
+ """Execute the hostvars validations against host"""
+ distro = self.template_var(hostvars, host, 'ansible_distribution')
+ odt = self.check_openshift_deployment_type(hostvars, host)
+ self.check_python_version(hostvars, host, distro)
+ self.check_image_tag_format(hostvars, host, odt)
+ self.no_origin_image_version(hostvars, host, odt)
+ self.network_plugin_check(hostvars, host)
+ self.check_hostname_vars(hostvars, host)
+
+ def run(self, tmp=None, task_vars=None):
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ # self.task_vars holds all in-scope variables.
+ # Ignore settting self.task_vars outside of init.
+ # pylint: disable=W0201
+ self.task_vars = task_vars or {}
+
+ # self._task.args holds task parameters.
+ # check_hosts is a parameter to this plugin, and should provide
+ # a list of hosts.
+ check_hosts = self._task.args.get('check_hosts')
+ if not check_hosts:
+ msg = "check_hosts is required"
+ raise errors.AnsibleModuleError(msg)
+
+ # We need to access each host's variables
+ hostvars = self.task_vars.get('hostvars')
+ if not hostvars:
+ msg = hostvars
+ raise errors.AnsibleModuleError(msg)
+
+ # We loop through each host in the provided list check_hosts
+ for host in check_hosts:
+ self.run_checks(hostvars, host)
+
+ result["changed"] = False
+ result["failed"] = False
+ result["msg"] = "Sanity Checks passed"
+
+ return result
diff --git a/roles/lib_utils/callback_plugins/aa_version_requirement.py b/roles/lib_utils/callback_plugins/aa_version_requirement.py
new file mode 100644
index 000000000..1093acdae
--- /dev/null
+++ b/roles/lib_utils/callback_plugins/aa_version_requirement.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python
+
+"""
+This callback plugin verifies the required minimum version of Ansible
+is installed for proper operation of the OpenShift Ansible Installer.
+The plugin is named with leading `aa_` to ensure this plugin is loaded
+first (alphanumerically) by Ansible.
+"""
+import sys
+from ansible import __version__
+
+if __version__ < '2.0':
+ # pylint: disable=import-error,no-name-in-module
+ # Disabled because pylint warns when Ansible v2 is installed
+ from ansible.callbacks import display as pre2_display
+ CallbackBase = object
+
+ def display(*args, **kwargs):
+ """Set up display function for pre Ansible v2"""
+ pre2_display(*args, **kwargs)
+else:
+ from ansible.plugins.callback import CallbackBase
+ from ansible.utils.display import Display
+
+ def display(*args, **kwargs):
+ """Set up display function for Ansible v2"""
+ display_instance = Display()
+ display_instance.display(*args, **kwargs)
+
+
+# Set to minimum required Ansible version
+REQUIRED_VERSION = '2.4.1.0'
+DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
+
+
+def version_requirement(version):
+ """Test for minimum required version"""
+ return version >= REQUIRED_VERSION
+
+
+class CallbackModule(CallbackBase):
+ """
+ Ansible callback plugin
+ """
+
+ CALLBACK_VERSION = 1.0
+ CALLBACK_NAME = 'version_requirement'
+
+ def __init__(self):
+ """
+ Version verification is performed in __init__ to catch the
+ requirement early in the execution of Ansible and fail gracefully
+ """
+ super(CallbackModule, self).__init__()
+
+ if not version_requirement(__version__):
+ display(
+ 'FATAL: Current Ansible version (%s) is not supported. %s'
+ % (__version__, DESCRIPTION), color='red')
+ sys.exit(1)
diff --git a/roles/lib_utils/callback_plugins/openshift_quick_installer.py b/roles/lib_utils/callback_plugins/openshift_quick_installer.py
new file mode 100644
index 000000000..365e2443d
--- /dev/null
+++ b/roles/lib_utils/callback_plugins/openshift_quick_installer.py
@@ -0,0 +1,360 @@
+# pylint: disable=invalid-name,protected-access,import-error,line-too-long,attribute-defined-outside-init
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""This file is a stdout callback plugin for the OpenShift Quick
+Installer. The purpose of this callback plugin is to reduce the amount
+of produced output for customers and enable simpler progress checking.
+
+What's different:
+
+* Playbook progress is expressed as: Play <current_play>/<total_plays> (Play Name)
+ Ex: Play 3/30 (Initialize Megafrobber)
+
+* The Tasks and Handlers in each play (and included roles) are printed
+ as a series of .'s following the play progress line.
+
+* Many of these methods include copy and paste code from the upstream
+ default.py callback. We do that to give us control over the stdout
+ output while allowing Ansible to handle the file logging
+ normally. The biggest changes here are that we are manually setting
+ `log_only` to True in the Display.display method and we redefine the
+ Display.banner method locally so we can set log_only on that call as
+ well.
+
+"""
+
+from __future__ import (absolute_import, print_function)
+import sys
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+
+
+class CallbackModule(CallbackBase):
+
+ """
+ Ansible callback plugin
+ """
+ CALLBACK_VERSION = 2.2
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'openshift_quick_installer'
+ CALLBACK_NEEDS_WHITELIST = False
+ plays_count = 0
+ plays_total_ran = 0
+
+ def __init__(self):
+ """Constructor, ensure standard self.*s are set"""
+ self._play = None
+ self._last_task_banner = None
+ super(CallbackModule, self).__init__()
+
+ def banner(self, msg, color=None):
+ '''Prints a header-looking line with stars taking up to 80 columns
+ of width (3 columns, minimum)
+
+ Overrides the upstream banner method so that display is called
+ with log_only=True
+ '''
+ msg = msg.strip()
+ star_len = (79 - len(msg))
+ if star_len < 0:
+ star_len = 3
+ stars = "*" * star_len
+ self._display.display("\n%s %s" % (msg, stars), color=color, log_only=True)
+
+ def _print_task_banner(self, task):
+ """Imported from the upstream 'default' callback"""
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ args = ''
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join('%s=%s' % a for a in task.args.items())
+ args = ' %s' % args
+
+ self.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG, log_only=True)
+
+ self._last_task_banner = task._uuid
+
+ def v2_playbook_on_start(self, playbook):
+ """This is basically the start of it all"""
+ self.plays_count = len(playbook.get_plays())
+ self.plays_total_ran = 0
+
+ if self._display.verbosity > 1:
+ from os.path import basename
+ self.banner("PLAYBOOK: %s" % basename(playbook._file_name))
+
+ def v2_playbook_on_play_start(self, play):
+ """Each play calls this once before running any tasks
+
+We could print the number of tasks here as well by using
+`play.get_tasks()` but that is not accurate when a play includes a
+role. Only the tasks directly assigned to a play are exposed in the
+`play` object.
+ """
+ self.plays_total_ran += 1
+ print("")
+ print("Play %s/%s (%s)" % (self.plays_total_ran, self.plays_count, play.get_name()))
+
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self._play = play
+
+ self.banner(msg)
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ """This prints out the task header. For example:
+
+TASK [openshift_facts : Ensure PyYaml is installed] ***...
+
+Rather than print out all that for every task, we print a dot
+character to indicate a task has been started.
+ """
+ sys.stdout.write('.')
+
+ args = ''
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ' %s' % args
+ self.banner("TASK [%s%s]" % (task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG, log_only=True)
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_handler_task_start(self, task):
+ """Print out task header for handlers
+
+Rather than print out a header for every handler, we print a dot
+character to indicate a handler task has been started.
+"""
+ sys.stdout.write('.')
+
+ self.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_cleanup_task_start(self, task):
+ """Print out a task header for cleanup tasks
+
+Rather than print out a header for every handler, we print a dot
+character to indicate a handler task has been started.
+"""
+ sys.stdout.write('.')
+
+ self.banner("CLEANUP TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_include(self, included_file):
+ """Print out paths to statically included files"""
+ msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_runner_on_ok(self, result):
+ """This prints out task results in a fancy format
+
+The only thing we change here is adding `log_only=True` to the
+.display() call
+ """
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+ if result._task.action in ('include', 'import_role'):
+ return
+ elif result._result.get('changed', False):
+ if delegated_vars:
+ msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: [%s]" % result._host.get_name()
+ color = C.COLOR_CHANGED
+ else:
+ if delegated_vars:
+ msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = C.COLOR_OK
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color, log_only=True)
+
+ self._handle_warnings(result._result)
+
+ def v2_runner_item_on_ok(self, result):
+ """Print out task results for items you're iterating over"""
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if result._task.action in ('include', 'import_role'):
+ return
+ elif result._result.get('changed', False):
+ msg = 'changed'
+ color = C.COLOR_CHANGED
+ else:
+ msg = 'ok'
+ color = C.COLOR_OK
+
+ if delegated_vars:
+ msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += ": [%s]" % result._host.get_name()
+
+ msg += " => (item=%s)" % (self._get_item(result._result),)
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color, log_only=True)
+
+ def v2_runner_item_on_skipped(self, result):
+ """Print out task results when an item is skipped"""
+ if C.DISPLAY_SKIPPED_HOSTS:
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_runner_on_skipped(self, result):
+ """Print out task results when a task (or something else?) is skipped"""
+ if C.DISPLAY_SKIPPED_HOSTS:
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: [%s]" % result._host.get_name()
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_playbook_on_notify(self, res, handler):
+ """What happens when a task result is 'changed' and the task has a
+'notify' list attached.
+ """
+ self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP, log_only=True)
+
+ ######################################################################
+ # So we can bubble up errors to the top
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ """I guess this is when an entire task has failed?"""
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_item_on_failed(self, result):
+ """When an item in a task fails."""
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if 'exception' in result._result:
+ if self._display.verbosity < 3:
+ # extract just the actual error message from the exception text
+ error = result._result['exception'].strip().split('\n')[-1]
+ msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
+ else:
+ msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
+
+ self._display.display(msg, color=C.COLOR_ERROR)
+
+ msg = "failed: "
+ if delegated_vars:
+ msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += "[%s]" % (result._host.get_name())
+
+ self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
+ self._handle_warnings(result._result)
+
+ ######################################################################
+ def v2_playbook_on_stats(self, stats):
+ """Print the final playbook run stats"""
+ self._display.display("", screen_only=True)
+ self.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+
+ self._display.display(
+ u"%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR)),
+ screen_only=True
+ )
+
+ self._display.display(
+ u"%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+ self._display.display("", screen_only=True)
+
+ # Some plays are conditional and won't run (such as load
+ # balancers) if they aren't required. Sometimes plays are
+ # conditionally included later in the run. Let the user know
+ # about this to avoid potential confusion.
+ if self.plays_total_ran != self.plays_count:
+ print("Installation Complete: Note: Play count is only an estimate, some plays may have been skipped or dynamically added")
+ self._display.display("", screen_only=True)
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
index a2bc9ecdb..58b228fee 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
@@ -31,7 +31,6 @@ certificates
Example playbook usage:
- name: Generate expiration results JSON
- become: no
run_once: yes
delegate_to: localhost
when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..9f73510c4
--- /dev/null
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+"""
+Custom filters for use in openshift-ansible
+"""
+import os
+import pdb
+import random
+import re
+
+from base64 import b64encode
+from collections import Mapping
+# pylint no-name-in-module and import-error disabled here because pylint
+# fails to properly detect the packages when installed in a virtualenv
+from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
+from operator import itemgetter
+
+import yaml
+
+from ansible import errors
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six import string_types, u
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+HAS_OPENSSL = False
+try:
+ import OpenSSL.crypto
+ HAS_OPENSSL = True
+except ImportError:
+ pass
+
+
+# pylint: disable=C0103
+
+def lib_utils_oo_pdb(arg):
+ """ This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | lib_utils_oo_pdb }}"
+ """
+ pdb.set_trace()
+ return arg
+
+
+def get_attr(data, attribute=None):
+ """ This looks up dictionary attributes of the form a.b.c and returns
+ the value.
+
+ If the key isn't present, None is returned.
+ Ex: data = {'a': {'b': {'c': 5}}}
+ attribute = "a.b.c"
+ returns 5
+ """
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ ptr = data
+ for attr in attribute.split('.'):
+ if attr in ptr:
+ ptr = ptr[attr]
+ else:
+ ptr = None
+ break
+
+ return ptr
+
+
+def oo_flatten(data):
+ """ This filter plugin will flatten a list of lists
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [item for sublist in data for item in sublist]
+
+
+def lib_utils_oo_collect(data_list, attribute=None, filters=None):
+ """ This takes a list of dict and collects all attributes specified into a
+ list. If filter is specified then we will include all items that
+ match _ALL_ of filters. If a dict entry is missing the key in a
+ filter it will be excluded from the match.
+ Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3]
+
+ This also deals with lists of lists with dict as elements.
+ Ex: data_list = [
+ [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'b':6, 'z': 'z'} # True, return
+ ],
+ [ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
+ ],
+ {'a':5, 'z': 'z'}, # True, return
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3, 5]
+ """
+ if not isinstance(data_list, list):
+ raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List")
+
+ if not attribute:
+ raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set")
+
+ data = []
+ retval = []
+
+ for item in data_list:
+ if isinstance(item, list):
+ retval.extend(lib_utils_oo_collect(item, attribute, filters))
+ else:
+ data.append(item)
+
+ if filters is not None:
+ if not isinstance(filters, dict):
+ raise errors.AnsibleFilterError(
+ "lib_utils_oo_collect expects filter to be a dict")
+ retval.extend([get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))])
+ else:
+ retval.extend([get_attr(d, attribute) for d in data])
+
+ retval = [val for val in retval if val is not None]
+
+ return retval
+
+
+def lib_utils_oo_select_keys_from_list(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [lib_utils_oo_select_keys(item, keys) for item in data]
+
+ return oo_flatten(retval)
+
+
+def lib_utils_oo_select_keys(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects to filter on a dict or object")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys if key in data]
+
+ return retval
+
+
+def lib_utils_oo_prepend_strings_in_list(data, prepend):
+ """ This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, string_types) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+
+def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
+ """Take a dict and arrange them as a list of dicts
+
+ Input data:
+ {'region': 'infra', 'test_k': 'test_v'}
+
+ Return data:
+ [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
+
+ Written for use of the oc_label module
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ rval = []
+ for label in data.items():
+ rval.append({key_title: label[0], value_title: label[1]})
+
+ return rval
+
+
+def oo_ami_selector(data, image_name):
+ """ This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+
+def lib_utils_oo_split(string, separator=','):
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
+ """
+ if isinstance(string, list):
+ return string
+ return string.split(separator)
+
+
+def lib_utils_oo_dict_to_keqv_list(data):
+ """Take a dict and return a list of k=v pairs
+
+ Input data:
+ {'a': 1, 'b': 2}
+
+ Return data:
+ ['a=1', 'b=2']
+ """
+ return ['='.join(str(e) for e in x) for x in data.items()]
+
+
+def lib_utils_oo_list_to_dict(lst, separator='='):
+ """ This converts a list of ["k=v"] to a dictionary {k: v}.
+ """
+ kvs = [i.split(separator) for i in lst]
+ return {k: v for k, v in kvs}
+
+
+def haproxy_backend_masters(hosts, port):
+ """ This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ """
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server['address'] = "%s:%s" % (server_ip, port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+
+# pylint: disable=too-many-branches
+def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
+ """ Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/root/custom1.crt",
+ "keyfile": "/root/custom1.key",
+ "cafile": "/root/custom-ca1.crt" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key",
+ "cafile": "custom-ca2.crt" }]
+
+ returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom1.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom2.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
+ "names": [ "some-hostname.com" ] }]
+ """
+ if not isinstance(named_certs_dir, string_types):
+ raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
+
+ if not isinstance(internal_hostnames, list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ if not HAS_OPENSSL:
+ raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except Exception:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = list(set(certificate['names']))
+ if 'cafile' not in certificate:
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+
+ for certificate in certificates:
+ # Update paths for configuration
+ certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
+ certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
+ if 'cafile' in certificate:
+ certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
+ return certificates
+
+
+def lib_utils_oo_generate_secret(num_bytes):
+ """ generate a session secret """
+
+ if not isinstance(num_bytes, int):
+ raise errors.AnsibleFilterError("|failed expects num_bytes is int")
+
+ return b64encode(os.urandom(num_bytes)).decode('utf-8')
+
+
+def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw):
+ """ returns a yaml snippet padded to match the indent level you specify """
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw))
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return "\n{0}".format(padded)
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
+
+
+def lib_utils_oo_pods_match_component(pods, deployment_type, component):
+ """ Filters a list of Pods and returns the ones matching the deployment_type and component
+ """
+ if not isinstance(pods, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(deployment_type, string_types):
+ raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
+ if not isinstance(component, string_types):
+ raise errors.AnsibleFilterError("failed expects component to be a string")
+
+ image_prefix = 'openshift/origin-'
+ if deployment_type == 'openshift-enterprise':
+ image_prefix = 'openshift3/ose-'
+
+ matching_pods = []
+ image_regex = image_prefix + component + r'.*'
+ for pod in pods:
+ for container in pod['spec']['containers']:
+ if re.search(image_regex, container['image']):
+ matching_pods.append(pod)
+ break # stop here, don't add a pod more than once
+
+ return matching_pods
+
+
+def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
+ """ Convert an image tag string to an RPM version if necessary
+ Empty strings and strings that are already in rpm version format
+ are ignored. Also remove non semantic version components.
+
+ Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
+ """
+ if not isinstance(version, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ if version.startswith("v"):
+ version = version[1:]
+ # Strip release from requested version, we no longer support this.
+ version = version.split('-')[0]
+
+ if include_dash and version and not version.startswith("-"):
+ version = "-" + version
+
+ return version
+
+
+def lib_utils_oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
+# pylint: disable=invalid-name, unused-argument
+def lib_utils_oo_loadbalancer_frontends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_frontends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(api_port)],
+ 'default_backend': 'atomic-openshift-api'}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_frontends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(nuage_rest_port)],
+ 'default_backend': 'nuage-monitor'})
+ return loadbalancer_frontends
+
+
+# pylint: disable=invalid-name
+def lib_utils_oo_loadbalancer_backends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_backends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': haproxy_backend_masters(servers_hostvars, api_port)}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ # pylint: disable=line-too-long
+ loadbalancer_backends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
+ return loadbalancer_backends
+
+
+def lib_utils_oo_chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
+
+
+def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
+ """Generates a random string of given length from a set of alphanumeric characters.
+ The default source uses [a-z][A-Z][0-9]
+ Ex:
+ - lib_utils_oo_random_word(3) => aB9
+ - lib_utils_oo_random_word(4, source='012') => 0123
+ """
+ return ''.join(random.choice(source) for i in range(length))
+
+
+def lib_utils_oo_contains_rule(source, apiGroups, resources, verbs):
+ '''Return true if the specified rule is contained within the provided source'''
+
+ rules = source['rules']
+
+ if rules:
+ for rule in rules:
+ if set(rule['apiGroups']) == set(apiGroups):
+ if set(rule['resources']) == set(resources):
+ if set(rule['verbs']) == set(verbs):
+ return True
+
+ return False
+
+
+def lib_utils_oo_selector_to_string_list(user_dict):
+ """Convert a dict of selectors to a key=value list of strings
+
+Given input of {'region': 'infra', 'zone': 'primary'} returns a list
+of items as ['region=infra', 'zone=primary']
+ """
+ selectors = []
+ for key in user_dict:
+ selectors.append("{}={}".format(key, user_dict[key]))
+ return selectors
+
+
+def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+ """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+ [
+ {
+ "name": "management-admin-dockercfg-p31s2"
+ },
+ {
+ "name": "management-admin-token-bnqsh"
+ }
+ ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+ `secret_hint` parameter. By default this is the secret holding the
+ SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+ - name: Get all SA Secrets
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+ - name: Save the SA bearer token secret name
+ set_fact:
+ management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
+
+ - name: Get the SA bearer token value
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ register: sa_secret
+
+ - name: Print the bearer token value
+ debug:
+ var: sa_secret.results.decoded.token
+
+ """
+ secret_name = None
+
+ for secret in sa_secrets:
+ # each secret is a hash
+ if secret['name'].find(secret_hint) == -1:
+ continue
+ else:
+ secret_name = secret['name']
+ break
+
+ return secret_name
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "lib_utils_oo_select_keys": lib_utils_oo_select_keys,
+ "lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list,
+ "lib_utils_oo_chomp_commit_offset": lib_utils_oo_chomp_commit_offset,
+ "lib_utils_oo_collect": lib_utils_oo_collect,
+ "lib_utils_oo_pdb": lib_utils_oo_pdb,
+ "lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list,
+ "lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict,
+ "lib_utils_oo_split": lib_utils_oo_split,
+ "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list,
+ "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict,
+ "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates,
+ "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret,
+ "lib_utils_oo_pods_match_component": lib_utils_oo_pods_match_component,
+ "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version,
+ "lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url,
+ "lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends,
+ "lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends,
+ "lib_utils_to_padded_yaml": lib_utils_to_padded_yaml,
+ "lib_utils_oo_random_word": lib_utils_oo_random_word,
+ "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
+ "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
+ "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "map_from_pairs": map_from_pairs
+ }
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
index dfcb11da3..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
index 003ce5f9e..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py
index ff15f693b..e67b19c28 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/lib_utils/filter_plugins/openshift_master.py
@@ -10,11 +10,7 @@ from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
-except ImportError:
- from ansible.module_utils.six import string_types, u
+from ansible.module_utils.six import string_types, u
import yaml
diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py
index 0cab1ca88..0cab1ca88 100755
--- a/roles/etcd/library/delegated_serial_command.py
+++ b/roles/lib_utils/library/delegated_serial_command.py
diff --git a/roles/lib_utils/library/kubeclient_ca.py b/roles/lib_utils/library/kubeclient_ca.py
new file mode 100644
index 000000000..a89a5574f
--- /dev/null
+++ b/roles/lib_utils/library/kubeclient_ca.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+''' kubeclient_ca ansible module '''
+
+import base64
+import yaml
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: kubeclient_ca
+short_description: Modify kubeclient certificate-authority-data
+author: Andrew Butcher
+requirements: [ ]
+'''
+EXAMPLES = '''
+- kubeclient_ca:
+ client_path: /etc/origin/master/admin.kubeconfig
+ ca_path: /etc/origin/master/ca-bundle.crt
+
+- slurp:
+ src: /etc/origin/master/ca-bundle.crt
+ register: ca_data
+- kubeclient_ca:
+ client_path: /etc/origin/master/admin.kubeconfig
+ ca_data: "{{ ca_data.content }}"
+'''
+
+
+def main():
+ ''' Modify kubeconfig located at `client_path`, setting the
+ certificate authority data to specified `ca_data` or contents of
+ `ca_path`.
+ '''
+
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ client_path=dict(required=True),
+ ca_data=dict(required=False, default=None),
+ ca_path=dict(required=False, default=None),
+ backup=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['ca_data', 'ca_path']],
+ required_one_of=[['ca_data', 'ca_path']]
+ )
+
+ client_path = module.params['client_path']
+ ca_data = module.params['ca_data']
+ ca_path = module.params['ca_path']
+ backup = module.params['backup']
+
+ try:
+ with open(client_path) as client_config_file:
+ client_config_data = yaml.safe_load(client_config_file.read())
+
+ if ca_data is None:
+ with open(ca_path) as ca_file:
+ ca_data = base64.standard_b64encode(ca_file.read())
+
+ changes = []
+ # Naively update the CA information for each cluster in the
+ # kubeconfig.
+ for cluster in client_config_data['clusters']:
+ if cluster['cluster']['certificate-authority-data'] != ca_data:
+ cluster['cluster']['certificate-authority-data'] = ca_data
+ changes.append(cluster['name'])
+
+ if not module.check_mode:
+ if len(changes) > 0 and backup:
+ module.backup_local(client_path)
+
+ with open(client_path, 'w') as client_config_file:
+ client_config_string = yaml.dump(client_config_data, default_flow_style=False)
+ client_config_string = client_config_string.replace('\'\'', '""')
+ client_config_file.write(client_config_string)
+
+ return module.exit_json(changed=(len(changes) > 0))
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ return module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/library/modify_yaml.py b/roles/lib_utils/library/modify_yaml.py
new file mode 100644
index 000000000..9b8f9ba33
--- /dev/null
+++ b/roles/lib_utils/library/modify_yaml.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+''' modify_yaml ansible module '''
+
+import yaml
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+
+DOCUMENTATION = '''
+---
+module: modify_yaml
+short_description: Modify yaml key value pairs
+author: Andrew Butcher
+requirements: [ ]
+'''
+EXAMPLES = '''
+- modify_yaml:
+ dest: /etc/origin/master/master-config.yaml
+ yaml_key: 'kubernetesMasterConfig.masterCount'
+ yaml_value: 2
+'''
+
+
+def set_key(yaml_data, yaml_key, yaml_value):
+ ''' Updates a parsed yaml structure setting a key to a value.
+
+ :param yaml_data: yaml structure to modify.
+ :type yaml_data: dict
+ :param yaml_key: Key to modify.
+ :type yaml_key: mixed
+ :param yaml_value: Value use for yaml_key.
+ :type yaml_value: mixed
+ :returns: Changes to the yaml_data structure
+ :rtype: dict(tuple())
+ '''
+ changes = []
+ ptr = yaml_data
+ final_key = yaml_key.split('.')[-1]
+ for key in yaml_key.split('.'):
+ # Key isn't present and we're not on the final key. Set to empty dictionary.
+ if key not in ptr and key != final_key:
+ ptr[key] = {}
+ ptr = ptr[key]
+ # Current key is the final key. Update value.
+ elif key == final_key:
+ if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): # noqa: F405
+ ptr[key] = yaml_value
+ changes.append((yaml_key, yaml_value))
+ else:
+ # Next value is None and we're not on the final key.
+ # Turn value into an empty dictionary.
+ if ptr[key] is None and key != final_key:
+ ptr[key] = {}
+ ptr = ptr[key]
+ return changes
+
+
+def main():
+ ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting
+ the key to the desired value.
+ '''
+
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ dest=dict(required=True),
+ yaml_key=dict(required=True),
+ yaml_value=dict(required=True),
+ backup=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ dest = module.params['dest']
+ yaml_key = module.params['yaml_key']
+ yaml_value = module.safe_eval(module.params['yaml_value'])
+ backup = module.params['backup']
+
+ # Represent null values as an empty string.
+ # pylint: disable=missing-docstring, unused-argument
+ def none_representer(dumper, data):
+ return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
+
+ yaml.add_representer(type(None), none_representer)
+
+ try:
+ with open(dest) as yaml_file:
+ yaml_data = yaml.safe_load(yaml_file.read())
+
+ changes = set_key(yaml_data, yaml_key, yaml_value)
+
+ if len(changes) > 0:
+ if backup:
+ module.backup_local(dest)
+ with open(dest, 'w') as yaml_file:
+ yaml_string = yaml.dump(yaml_data, default_flow_style=False)
+ yaml_string = yaml_string.replace('\'\'', '""')
+ yaml_file.write(yaml_string)
+
+ return module.exit_json(changed=(len(changes) > 0), changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ return module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py
index e355266b0..e355266b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/lib_utils/library/openshift_cert_expiry.py
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
index 440b8ec28..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
diff --git a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py b/roles/lib_utils/library/os_firewall_manage_iptables.py
index aeee3ede8..aeee3ede8 100755..100644
--- a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/lib_utils/library/os_firewall_manage_iptables.py
diff --git a/roles/lib_utils/library/rpm_q.py b/roles/lib_utils/library/rpm_q.py
new file mode 100644
index 000000000..3dec50fc2
--- /dev/null
+++ b/roles/lib_utils/library/rpm_q.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tobias Florek <tob@butter.sh>
+# Licensed under the terms of the MIT License
+"""
+An ansible module to query the RPM database. For use, when yum/dnf are not
+available.
+"""
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import * # noqa: F403
+
+DOCUMENTATION = """
+---
+module: rpm_q
+short_description: Query the RPM database
+author: Tobias Florek
+options:
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ state:
+ description:
+ - Whether the package is supposed to be installed or not
+ choices: [present, absent]
+ default: present
+"""
+
+EXAMPLES = """
+- rpm_q: name=ansible state=present
+- rpm_q: name=ansible state=absent
+"""
+
+RPM_BINARY = '/bin/rpm'
+
+
+def main():
+ """
+ Checks rpm -q for the named package and returns the installed packages
+ or None if not installed.
+ """
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ # pylint: disable=invalid-name
+ rc, out, err = module.run_command([RPM_BINARY, '-q', name])
+
+ installed = out.rstrip('\n').split('\n')
+
+ if rc != 0:
+ if state == 'present':
+ module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'present':
+ module.exit_json(changed=False, installed_versions=installed)
+ else:
+ module.fail_json(msg="%s is installed", installed_versions=installed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
new file mode 100644
index 000000000..4858c5ec6
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -0,0 +1,143 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, regions_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ predicates = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to enterprise short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ # Predicates ordered according to OpenShift Origin source:
+ # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
+ if short_version == '3.1':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+ ])
+
+ if short_version == '3.2':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+ ])
+
+ if short_version == '3.3':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+ ])
+
+ if short_version == '3.4':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+ ])
+
+ if short_version in ['3.5', '3.6']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ ])
+
+ if short_version in ['3.7', '3.8', '3.9']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
+ if regions_enabled:
+ region_predicate = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+ }
+ predicates.append(region_predicate)
+
+ return predicates
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
new file mode 100644
index 000000000..18e1b2e0c
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -0,0 +1,117 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, zones_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ priorities = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to origin short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ if short_version == '3.1':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.2':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.3':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.4':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']:
+ priorities.extend([
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1},
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if zones_enabled:
+ zone_priority = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+ }
+ priorities.append(zone_priority)
+
+ return priorities
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/lib_utils/test/conftest.py
index df948fff0..aabdd4fa1 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/lib_utils/test/conftest.py
@@ -1,7 +1,15 @@
# pylint: disable=missing-docstring,invalid-name,redefined-outer-name
+import os
import pytest
+import sys
+
from OpenSSL import crypto
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
+
+from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402
+from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402
+
# Parameter list for valid_cert fixture
VALID_CERTIFICATE_PARAMS = [
{
@@ -117,3 +125,48 @@ def valid_cert(request, ca):
'cert_file': cert_file,
'cert': cert
}
+
+
+@pytest.fixture()
+def predicates_lookup():
+ return PredicatesLookupModule()
+
+
+@pytest.fixture()
+def priorities_lookup():
+ return PrioritiesLookupModule()
+
+
+@pytest.fixture()
+def facts():
+ return {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+
+@pytest.fixture(params=[True, False])
+def regions_enabled(request):
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def zones_enabled(request):
+ return request.param
+
+
+def v_prefix(release):
+ """Prefix a release number with 'v'."""
+ return "v" + release
+
+
+def minor(release):
+ """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
+ return release + ".1"
+
+
+@pytest.fixture(params=[str, v_prefix, minor])
+def release_mod(request):
+ """Modifies a release string to alternative valid values."""
+ return request.param
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
index e8da1e04a..e8da1e04a 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py
index 140cced73..140cced73 100644
--- a/roles/openshift_master_facts/test/conftest.py
+++ b/roles/lib_utils/test/openshift_master_facts_conftest.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
index 11aad9f03..11aad9f03 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
index 527fc9ff4..527fc9ff4 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py
index 8a521a765..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/lib_utils/test/test_fakeopensslclasses.py
diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py
index 98792e2ee..98792e2ee 100644
--- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py
+++ b/roles/lib_utils/test/test_load_and_handle_cert.py
diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml
index 677f206ea..50ad7e373 100644
--- a/roles/nickhammond.logrotate/tasks/main.yml
+++ b/roles/nickhammond.logrotate/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: nickhammond.logrotate | Install logrotate
package: name=logrotate state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml
index 36838debc..0d0b8d1a5 100644
--- a/roles/nuage_ca/meta/main.yml
+++ b/roles/nuage_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: nuage_common }
+- role: nuage_common
diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml
index d96d0d802..cb7844bc5 100644
--- a/roles/nuage_ca/tasks/main.yaml
+++ b/roles/nuage_ca/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- name: Install openssl
package: name=openssl state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml
index 6c8c9f8d2..ec42518ff 100644
--- a/roles/nuage_common/tasks/main.yml
+++ b/roles/nuage_common/tasks/main.yml
@@ -2,17 +2,17 @@
- name: Set the Nuage plugin openshift directory fact to handle Atomic host install
set_fact:
nuage_node_plugin_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI network config directory fact to handle Atomic host install
set_fact:
nuage_node_cni_netconf_dir: /var/etc/cni/net.d/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI binary directory fact to handle Atomic host install
set_fact:
nuage_node_cni_bin_dir: /var/opt/cni/bin/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Assure CNI plugin config dir exists before daemon set install
become: yes
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index e2f7af5ad..643800680 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -14,4 +14,4 @@ galaxy_info:
- system
dependencies:
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index c264427de..29e16b6f8 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -5,22 +5,22 @@
- name: Set the Nuage certificate directory fact for Atomic hosts
set_fact:
cert_output_dir: /var/usr/share/nuage-openshift-monitor
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage kubeconfig file path fact for Atomic hosts
set_fact:
kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage monitor yaml location fact for Atomic hosts
set_fact:
kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage monitor certs location fact for Atomic hosts
set_fact:
nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage master config directory for daemon sets install
set_fact:
@@ -35,27 +35,27 @@
- name: Set the Nuage CNI plugin binary directory for daemon sets install
set_fact:
nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
- name: Create directory /var/usr/share/nuage-openshift-monitor
become: yes
file: path=/var/usr/share/nuage-openshift-monitor state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create directory /var/usr/bin for monitor binary on atomic
become: yes
file: path=/var/usr/bin state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create CNI bin directory /var/opt/cni/bin
become: yes
file: path=/var/opt/cni/bin state=directory
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create the log directory
become: yes
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
index fbf2c4f8d..9127b33d6 100644
--- a/roles/nuage_master/tasks/serviceaccount.yml
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -19,7 +19,7 @@
- name: Generate the node client config
command: >
- {{ openshift.common.client_binary }} adm create-api-client-config
+ {{ openshift_client_binary }} adm create-api-client-config
--certificate-authority={{ openshift_master_ca_cert }}
--client-dir={{ cert_output_dir }}
--master={{ openshift.master.api_url }}
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index 9b0315054..0480502b7 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -15,4 +15,4 @@ galaxy_info:
dependencies:
- role: nuage_common
- role: nuage_ca
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index c6b7a9b10..1f1bd1653 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -2,17 +2,17 @@
- name: Set the Nuage plugin openshift directory fact for Atomic hosts
set_fact:
vsp_openshift_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage CNI binary directory fact for Atomic hosts
set_fact:
cni_bin_dir: /var/opt/cni/bin/
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Set the Nuage plugin certs directory fact for Atomic hosts
set_fact:
nuage_plugin_crt_dir: /var/usr/share/vsp-openshift
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Assure CNI conf dir exists
become: yes
@@ -36,7 +36,7 @@
- name: Add additional Docker mounts for Nuage for atomic hosts
become: yes
lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Restart node services
command: /bin/true
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 4aca5c7a8..de73ab01d 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -7,9 +7,9 @@ This role contains many task-areas to provision resources and perform actions
against an AWS account for the purposes of dynamically building an openshift
cluster.
-This role is primarily intended to be used with "include_role" and "tasks_from".
+This role is primarily intended to be used with "import_role" and "tasks_from".
-include_role can be called from the tasks section in a play. See example
+import_role can be called from the tasks section in a play. See example
playbook below for reference.
These task-areas are:
@@ -40,7 +40,7 @@ Example Playbook
----------------
```yaml
-- include_role:
+- import_role:
name: openshift_aws
tasks_from: vpc.yml
vars:
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 74e5d1dde..efd2468b2 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -98,17 +98,26 @@ openshift_aws_elb_dict:
proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: True
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
@@ -122,12 +131,25 @@ openshift_aws_ami_map:
openshift_aws_master_group:
- name: "{{ openshift_aws_clusterid }} master group"
group: master
+ tags:
+ host-type: master
+ sub-host-type: default
+ runtime: docker
openshift_aws_node_groups:
- name: "{{ openshift_aws_clusterid }} compute group"
group: compute
+ tags:
+ host-type: node
+ sub-host-type: compute
+ runtime: docker
+
- name: "{{ openshift_aws_clusterid }} infra group"
group: infra
+ tags:
+ host-type: node
+ sub-host-type: infra
+ runtime: docker
openshift_aws_created_asgs: []
openshift_aws_current_asgs: []
@@ -144,10 +166,6 @@ openshift_aws_master_group_config:
min_size: 3
max_size: 3
desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- runtime: docker
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -167,10 +185,6 @@ openshift_aws_node_group_config:
min_size: 3
max_size: 100
desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- runtime: docker
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -186,10 +200,6 @@ openshift_aws_node_group_config:
min_size: 2
max_size: 20
desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
- runtime: docker
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -200,6 +210,7 @@ openshift_aws_node_group_config:
openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_elb_az_load_balancing: False
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 7fb617dd5..a9f9cc3c4 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -30,7 +30,7 @@
- name: query all asg's for this cluster
ec2_asg_facts:
region: "{{ openshift_aws_region }}"
- tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}"
+ tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}"
register: asgs
- fail:
@@ -43,6 +43,7 @@
- name: set the value for the deployment_serial and the current asgs
set_fact:
+ # scale_groups_serial is a custom filter in role lib_utils
l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..a52f63bd5
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,15 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 696b323c0..786db1570 100644
--- a/roles/openshift_aws/tasks/provision_instance.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -14,11 +14,7 @@
instance_type: m4.xlarge
vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_type: gp2
- volume_size: 100
- delete_on_termination: true
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
wait: yes
exact_count: 1
count_tag:
@@ -46,5 +42,5 @@
- name: add host to nodes
add_host:
- groups: nodes
+ groups: nodes,g_new_node_hosts
name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 3632f7ce9..6ce8c58ba 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -22,7 +22,7 @@
else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"
tags:
- "{{ openshift_aws_node_group_config_tags
- | combine(l_node_group_config[openshift_aws_node_group.group].tags)
+ | combine(openshift_aws_node_group.tags)
| combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"
- name: append the asg name to the openshift_aws_created_asgs fact
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 1f4ef3e1c..3ad876e37 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -8,6 +8,7 @@
tags:
"{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
+ # scale_groups_match_capacity is a custom filter in role lib_utils
until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml
index 422d08400..60ac189a8 100644
--- a/roles/openshift_builddefaults/meta/main.yml
+++ b/roles/openshift_builddefaults/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_buildoverrides/meta/main.yml b/roles/openshift_buildoverrides/meta/main.yml
index e9d2e8712..edca92e6f 100644
--- a/roles/openshift_buildoverrides/meta/main.yml
+++ b/roles/openshift_buildoverrides/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml
index cf49a6ebf..df53280c8 100644
--- a/roles/openshift_buildoverrides/vars/main.yml
+++ b/roles/openshift_buildoverrides/vars/main.yml
@@ -9,3 +9,4 @@ buildoverrides_yaml:
imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
+ tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}"
diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index 81b49ce60..b2081efc6 100644
--- a/roles/openshift_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -15,3 +15,4 @@ galaxy_info:
dependencies:
- role: openshift_cli
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index ea4702248..9c8534c74 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -9,9 +9,9 @@
- name: Install the base package for admin tooling
package:
- name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: install_result
until: install_result is succeeded
delegate_to: "{{ openshift_ca_host }}"
@@ -19,7 +19,8 @@
- name: Reload generated facts
openshift_facts:
- when: hostvars[openshift_ca_host].install_result is changed
+ when:
+ - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed
- name: Create openshift_ca_config_dir if it does not exist
file:
@@ -41,7 +42,7 @@
- set_fact:
master_ca_missing: "{{ False in (g_master_ca_stat_result.results
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list) }}"
run_once: true
@@ -87,11 +88,11 @@
# This should NOT replace the CA due to --overwrite=false when a CA already exists.
- name: Create the master certificates if they do not already exist
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
@@ -117,7 +118,7 @@
src: "{{ item }}"
dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/"
remote_src: true
- with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}"
+ with_items: "{{ g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- copy:
@@ -137,7 +138,7 @@
- name: Test local loopback context
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view
--config={{ openshift_master_loopback_config }}
changed_when: false
register: loopback_config
@@ -154,9 +155,9 @@
register: openshift_ca_loopback_tmpdir
- name: Generate the loopback master client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority={{ openshift_ca_cert }}
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml
index c13b29ba5..6758f5b36 100644
--- a/roles/openshift_certificate_expiry/meta/main.yml
+++ b/roles/openshift_certificate_expiry/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index b5234bd1e..7062b5060 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -7,7 +7,6 @@
register: check_results
- name: Generate expiration report HTML
- become: no
run_once: yes
template:
src: cert-expiry-table.html.j2
@@ -17,11 +16,12 @@
- name: Generate the result JSON string
run_once: yes
- set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
+ set_fact:
+ # oo_cert_expiry_results_to_json is a custom filter in role lib_utils
+ json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
- become: no
run_once: yes
template:
src: save_json_results.j2
diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml
index 631a0455e..9faec639f 100644
--- a/roles/openshift_cli/defaults/main.yml
+++ b/roles/openshift_cli/defaults/main.yml
@@ -8,4 +8,4 @@ system_images_registry: "{{ system_images_registry_dict[openshift_deployment_typ
openshift_use_crio_only: False
l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(False)) | bool }}"
-l_use_cli_atomic_image: "{{ openshift_use_crio_only or l_is_system_container_image }}"
+l_use_cli_atomic_image: "{{ (openshift_use_crio_only | bool) or (l_is_system_container_image | bool) }}"
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 5d2b6abed..e531543b9 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 68d82e436..ae8d1ace0 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install clients
package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -12,13 +12,14 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "docker"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_use_cli_atomic_image | bool
- block:
@@ -28,13 +29,14 @@
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
@@ -42,6 +44,6 @@
- name: Install bash completion for oc tools
package: name=bash-completion state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_cloud_provider/meta/main.yml b/roles/openshift_cloud_provider/meta/main.yml
index 8ab95bf5a..e49cc4430 100644
--- a/roles/openshift_cloud_provider/meta/main.yml
+++ b/roles/openshift_cloud_provider/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index dff492a69..3513577fa 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -19,3 +19,6 @@
- include_tasks: gce.yml
when: cloudprovider_is_gce | bool
+
+- include_tasks: vsphere.yml
+ when: cloudprovider_is_vsphere | bool
diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml
new file mode 100644
index 000000000..3a33df241
--- /dev/null
+++ b/roles/openshift_cloud_provider/tasks/vsphere.yml
@@ -0,0 +1,6 @@
+---
+- name: Create cloud config
+ template:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf"
+ src: vsphere.conf.j2
+ when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 313ee02b4..30f18ffa9 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -19,3 +19,7 @@ region = {{ openshift_cloudprovider_openstack_region }}
[LoadBalancer]
subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }}
{% endif %}
+{% if openshift_cloudprovider_openstack_blockstorage_version is defined %}
+[BlockStorage]
+bs-version={{ openshift_cloudprovider_openstack_blockstorage_version }}
+{% endif %} \ No newline at end of file
diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
new file mode 100644
index 000000000..84e5e371c
--- /dev/null
+++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
@@ -0,0 +1,15 @@
+[Global]
+user = "{{ openshift_cloudprovider_vsphere_username }}"
+password = "{{ openshift_cloudprovider_vsphere_password }}"
+server = "{{ openshift_cloudprovider_vsphere_host }}"
+port = 443
+insecure-flag = 1
+datacenter = {{ openshift_cloudprovider_vsphere_datacenter }}
+datastore = {{ openshift_cloudprovider_vsphere_datastore }}
+{% if openshift_cloudprovider_vsphere_folder is defined %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/
+{% else %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/
+{% endif %}
+[Disk]
+scsicontrollertype = pvscsi
diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml
index c9d953f58..e71db80b9 100644
--- a/roles/openshift_cloud_provider/vars/main.yml
+++ b/roles/openshift_cloud_provider/vars/main.yml
@@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}"
cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}"
cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}"
cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}"
+cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}"
diff --git a/roles/openshift_cluster_autoscaler/README.md b/roles/openshift_cluster_autoscaler/README.md
index d775a8a71..137ae0cef 100644
--- a/roles/openshift_cluster_autoscaler/README.md
+++ b/roles/openshift_cluster_autoscaler/README.md
@@ -28,7 +28,7 @@ Example Playbook
remote_user: root
tasks:
- name: include role autoscaler
- include_role:
+ import_role:
name: openshift_cluster_autoscaler
vars:
openshift_clusterid: opstest
diff --git a/roles/openshift_cluster_autoscaler/meta/main.yml b/roles/openshift_cluster_autoscaler/meta/main.yml
index d2bbd2576..543eb6fed 100644
--- a/roles/openshift_cluster_autoscaler/meta/main.yml
+++ b/roles/openshift_cluster_autoscaler/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml
new file mode 100644
index 000000000..ebe5671d2
--- /dev/null
+++ b/roles/openshift_daemonset_config/defaults/main.yml
@@ -0,0 +1,19 @@
+---
+openshift_daemonset_config_namespace: openshift-node
+openshift_daemonset_config_daemonset_name: ops-node-config
+openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}"
+openshift_daemonset_config_node_selector:
+ config: config
+openshift_daemonset_config_sa_name: ops
+openshift_daemonset_config_configmap_files: {}
+openshift_daemonset_config_configmap_literals: {}
+openshift_daemonset_config_monitoring: False
+openshift_daemonset_config_interval: 300
+openshift_daemonset_config_script: config.sh
+openshift_daemonset_config_secret_name: operations-config-secret
+openshift_daemonset_config_secrets: {}
+openshift_daemonset_config_runasuser: 0
+openshift_daemonset_config_privileged: True
+openshift_daemonset_config_resources:
+ cpu: 10m
+ memory: 10Mi
diff --git a/roles/openshift_daemonset_config/meta/main.yml b/roles/openshift_daemonset_config/meta/main.yml
new file mode 100644
index 000000000..d2bbd2576
--- /dev/null
+++ b/roles/openshift_daemonset_config/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_openshift
diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml
new file mode 100644
index 000000000..450cc9dca
--- /dev/null
+++ b/roles/openshift_daemonset_config/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+- name: add a sa
+ oc_serviceaccount:
+ name: "{{ openshift_daemonset_config_sa_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+
+- name: add sa to privileged scc
+ oc_adm_policy_user:
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_daemonset_config_namespace }}:{{ openshift_daemonset_config_sa_name }}"
+
+- name: copy template to disk
+ template:
+ dest: "/tmp/{{ item.name }}"
+ src: "{{ item.name }}.j2"
+ with_items:
+ - name: daemonset.yml
+
+- name: copy files to disk
+ copy:
+ src: "{{ item.key }}"
+ dest: "{{ item.value }}"
+ with_dict: "{{ openshift_daemonset_config_configmap_files }}"
+
+- name: create the namespace
+ oc_project:
+ state: present
+ name: "{{ openshift_daemonset_config_namespace }}"
+
+- name: lay down secrets
+ oc_secret:
+ state: present
+ name: "{{ openshift_daemonset_config_secret_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ delete_after: true
+ contents: "{{ openshift_daemonset_config_secrets }}"
+ when:
+ - openshift_daemonset_config_secrets != {}
+
+- name: create the configmap
+ oc_configmap:
+ state: present
+ name: "{{ openshift_daemonset_config_configmap_name }}"
+ namespace: "{{ openshift_daemonset_config_namespace }}"
+ from_literal: "{{ openshift_daemonset_config_configmap_literals }}"
+ from_file: "{{ openshift_daemonset_config_configmap_files }}"
+
+- name: deploy daemonset
+ oc_obj:
+ state: present
+ namespace: "{{ openshift_daemonset_config_namespace }}" # openshift-node??
+ name: "{{ openshift_daemonset_config_daemonset_name }}"
+ kind: daemonset
+ files:
+ - /tmp/daemonset.yml
diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
new file mode 100644
index 000000000..9792f6d16
--- /dev/null
+++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
@@ -0,0 +1,142 @@
+---
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: {{ openshift_daemonset_config_daemonset_name }}
+ annotations:
+ kubernetes.io/description: |
+ This daemon set manages the operational configuration for a cluster and ensures all nodes have
+ a concrete set of config in place. It could also use a local ansible run against the /host directory.
+spec:
+ selector:
+ matchLabels:
+ app: {{ openshift_daemonset_config_daemonset_name }}
+ confighosts: ops
+ ops.openshift.io/role: operations
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: {{ openshift_daemonset_config_daemonset_name }}
+ confighosts: ops
+ ops.openshift.io/role: operations
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+{% if openshift_daemonset_config_node_selector is defined and openshift_daemonset_config_node_selector != {} %}
+ nodeSelector: {{ openshift_daemonset_config_node_selector | to_json }}
+{% endif %}
+ serviceAccountName: {{ openshift_daemonset_config_sa_name }}
+ hostNetwork: true
+ hostPID: true
+ hostIPC: true
+ containers:
+ - name: config
+ image: centos:7
+ env:
+ - name: RESYNC_INTERVAL
+ value: "{{ openshift_daemonset_config_interval }}"
+ command:
+ - /bin/bash
+ - -c
+ - |
+ #!/bin/sh
+ set -o errexit
+
+ while true; do
+
+ # execute user defined script
+ sh /opt/config/{{ openshift_daemonset_config_script }}
+
+ # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again
+ echo "Success, sleeping for ${RESYNC_INTERVAL}s"
+ exec sleep ${RESYNC_INTERVAL}
+
+ # Return to perform the config
+ done
+ securityContext:
+ # Must be root to modify host system
+ runAsUser: {{ openshift_daemonset_config_runasuser }}
+ # Permission could be reduced by selecting an appropriate SELinux policy that allows
+ # us to update the named directories
+ privileged: {{ openshift_daemonset_config_privileged }}
+ volumeMounts:
+ # Directory which contains the host volume.
+ - mountPath: /host
+ name: host
+ # Our node configuration
+ - mountPath: /opt/config
+ name: config
+{% if openshift_daemonset_config_secrets != {} %}
+ # Our delivered secrets
+ - mountPath: /opt/secrets
+ name: secrets
+{% endif %}
+ resources:
+ requests:
+ cpu: {{ openshift_daemonset_config_resources.cpu }}
+ memory: {{ openshift_daemonset_config_resources.memory }}
+{% if openshift_daemonset_config_monitoring %}
+ - name: monitoring
+ image: openshifttools/oso-centos7-host-monitoring:latest
+ securityContext:
+ # Must be root to read content
+ runAsUser: 0
+ privileged: true
+
+ volumeMounts:
+ - mountPath: /host
+ name: host
+ readOnly: true
+ - mountPath: /etc/localtime
+ subPath: etc/localtime
+ name: host
+ readOnly: true
+ - mountPath: /sys
+ subPath: sys
+ name: host
+ readOnly: true
+ - mountPath: /var/run/docker.sock
+ subPath: var/run/docker.sock
+ name: host
+ readOnly: true
+ - mountPath: /var/run/openvswitch
+ subPath: var/run/openvswitch
+ name: host
+ readOnly: true
+ - mountPath: /etc/origin
+ subPath: etc/origin
+ name: host
+ readOnly: true
+ - mountPath: /usr/bin/oc
+ subPath: usr/bin/oc
+ name: host
+ readOnly: true
+ name: host
+ readOnly: true
+ - mountPath: /host/var/cache/yum
+ subPath: var/cache/yum
+ name: host
+ - mountPath: /container_setup/monitoring-config.yml
+ subPath: monitoring-config.yaml
+ name: config
+ - mountPath: /opt/config
+ name: config
+ resources:
+ requests:
+ cpu: 10m
+ memory: 10Mi
+{% endif %}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ openshift_daemonset_config_configmap_name }}
+{% if openshift_daemonset_config_secrets != {} %}
+ - name: secrets
+ secret:
+ secretName: {{ openshift_daemonset_config_secret_name }}
+{% endif %}
+ - name: host
+ hostPath:
+ path: /
diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml
index d7d57fe39..30671a59a 100644
--- a/roles/openshift_default_storage_class/meta/main.yml
+++ b/roles/openshift_default_storage_class/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml
index f88a7c533..c8472d8bc 100644
--- a/roles/openshift_docker_gc/meta/main.yml
+++ b/roles/openshift_docker_gc/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 0e28fec03..25ae6a936 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_etcd_facts
- role: etcd
+- role: lib_utils
diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml
index fbc72c8a3..6c79d345c 100644
--- a/roles/openshift_etcd_client_certificates/meta/main.yml
+++ b/roles/openshift_etcd_client_certificates/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_etcd_client_certificates/tasks/main.yml b/roles/openshift_etcd_client_certificates/tasks/main.yml
index 7f8b667f0..18d07fc2f 100644
--- a/roles/openshift_etcd_client_certificates/tasks/main.yml
+++ b/roles/openshift_etcd_client_certificates/tasks/main.yml
@@ -1,4 +1,4 @@
---
-- include_role:
+- import_role:
name: etcd
tasks_from: client_certificates
diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml
index 925aa9f92..5e64a8596 100644
--- a/roles/openshift_etcd_facts/meta/main.yml
+++ b/roles/openshift_etcd_facts/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index 0c072b64a..d716c9505 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -1,6 +1,6 @@
---
-etcd_is_containerized: "{{ openshift.common.is_containerized }}"
-etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_is_containerized: "{{ openshift_is_containerized | bool }}"
+etcd_is_atomic: "{{ openshift_is_atomic }}"
etcd_hostname: "{{ openshift.common.hostname }}"
etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index e623b33f3..0a6e8f20c 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true
content_version: "{{ openshift.common.examples_content_version }}"
-examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
+examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples"
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams:
- "{{ image_streams_base }}/image-streams-centos7.json"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 68a0e8857..648bf7293 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.4.7
-ORIGIN_VERSION=${1:-v3.7}
+ORIGIN_VERSION=${1:-v3.9}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
index e7af160d9..ad17b709e 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
@@ -944,7 +961,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "openshift/jenkins-2-centos7:latest"
+ "name": "openshift/jenkins-2-centos7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
index 2b082fc75..efc8705f4 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
@@ -846,7 +863,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
index 3771280bf..e944f21a5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
@@ -198,12 +199,7 @@
}
},
"env": [
- ],
- "resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
- }
+ ]
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..87ae6ed14 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral",
+ "template": "jenkins-ephemeral-template"
+ },
"objects": [
{
"kind": "Route",
@@ -275,10 +279,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..95d15b55f 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent",
+ "template": "jenkins-persistent-template"
+ },
"objects": [
{
"kind": "Route",
@@ -299,10 +303,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index f3fe2dcbe..9f46a4683 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -11,4 +11,6 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 356317431..7787da4f0 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -13,18 +13,23 @@
# use it either due to changes introduced in Ansible 2.x.
- name: Create local temp dir for OpenShift examples copy
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- become: False
register: copy_examples_mktemp
run_once: True
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 777 "{{ copy_examples_mktemp.stdout }}"
+ run_once: True
+
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" .
args:
# Disables the following warning:
# Consider using unarchive module rather than running tar
warn: no
- become: False
- register: copy_examples_tar
+
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 744 "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar"
+ run_once: True
- name: Create the remote OpenShift examples directory
file:
@@ -38,7 +43,6 @@
dest: "{{ examples_base }}/"
- name: Cleanup the OpenShift Examples temp dir
- become: False
local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent
# Done copying examples
@@ -53,7 +57,7 @@
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_rhel | bool
with_items:
- "{{ rhel_image_streams }}"
@@ -63,7 +67,7 @@
- name: Import Centos Image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_centos | bool
with_items:
- "{{ centos_image_streams }}"
@@ -73,7 +77,7 @@
- name: Import db templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
when: openshift_examples_load_db_templates | bool
register: oex_import_db_templates
failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -90,7 +94,7 @@
- "{{ quickstarts_base }}/django.json"
- name: Remove defunct quickstart templates from openshift namespace
- command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+ command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- nodejs-example
- cakephp-example
@@ -102,7 +106,7 @@
- name: Import quickstart-templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
when: openshift_examples_load_quickstarts | bool
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -116,7 +120,7 @@
- "{{ xpaas_templates_base }}/sso70-basic.json"
- name: Remove old xPaas templates from openshift namespace
- command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
+ command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- sso70-basic
register: oex_delete_old_xpaas_templates
@@ -125,7 +129,7 @@
- name: Import xPaas image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_streams
failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -133,7 +137,7 @@
- name: Import xPaas templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
+ {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_templates
failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index ad7c00d14..6532d7fe2 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,14 +1,14 @@
---
- when:
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
- r_openshift_excluder_install_ran is not defined
block:
- name: Install docker excluder - yum
package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
@@ -23,7 +23,7 @@
# https://bugzilla.redhat.com/show_bug.cgi?id=1199432
- name: Install docker excluder - dnf
package:
- name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
@@ -33,7 +33,7 @@
- name: Install openshift excluder - yum
package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
@@ -47,7 +47,7 @@
# https://bugzilla.redhat.com/show_bug.cgi?id=1199432
- name: Install openshift excluder - dnf
package:
- name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 4f5277fa2..22a3fcd3b 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -3,7 +3,7 @@
# - excluder
- name: Get available excluder version
repoquery:
- name: "{{ excluder }}"
+ name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index c9c7b378c..402c3dc3e 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new
partition space.
- hosts: mynodes
- become: no
remote_user: root
gather_facts: no
roles:
@@ -68,7 +67,6 @@ partition space.
* Create an ansible playbook, say `expandvar.yaml`:
```
- hosts: mynodes
- become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index c7e21ba99..b38ebdfb4 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,16 +1,16 @@
---
- name: Ensure growpart is installed
package: name=cloud-utils-growpart state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
+ failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Grow the partitions
command: "growpart {{oep_drive}} {{oep_partition}}"
diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml
index 53a3bc87e..a223ffba6 100644
--- a/roles/openshift_facts/defaults/main.yml
+++ b/roles/openshift_facts/defaults/main.yml
@@ -1,8 +1,13 @@
---
+openshift_client_binary: "{{ (openshift_is_containerized | bool) | ternary('/usr/local/bin/oc', 'oc') }}"
+
openshift_cli_image_dict:
origin: 'openshift/origin'
openshift-enterprise: 'openshift3/ose'
+repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}"
+repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}"
+
openshift_hosted_images_dict:
origin: 'openshift/origin-${component}:${version}'
openshift-enterprise: 'openshift3/ose-${component}:${version}'
@@ -94,11 +99,6 @@ openshift_prometheus_alertbuffer_storage_access_modes:
openshift_prometheus_alertbuffer_storage_create_pv: True
openshift_prometheus_alertbuffer_storage_create_pvc: False
-
-openshift_router_selector: "region=infra"
-openshift_hosted_router_selector: "{{ openshift_router_selector }}"
-openshift_hosted_registry_selector: "{{ openshift_router_selector }}"
-
openshift_service_type_dict:
origin: origin
openshift-enterprise: atomic-openshift
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index a10ba9310..d7c358a2f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -69,22 +69,6 @@ def migrate_common_facts(facts):
return facts
-def migrate_node_facts(facts):
- """ Migrate facts from various roles into node """
- params = {
- 'common': ('dns_ip'),
- }
- if 'node' not in facts:
- facts['node'] = {}
- # pylint: disable=consider-iterating-dictionary
- for role in params.keys():
- if role in facts:
- for param in params[role]:
- if param in facts[role]:
- facts['node'][param] = facts[role].pop(param)
- return facts
-
-
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
@@ -104,7 +88,6 @@ def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_common_facts(migrated_facts)
- migrated_facts = migrate_node_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
@@ -536,8 +519,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
- includes master.registry_url, node.registry_url,
- node.storage_plugin_deps
+ includes master.registry_url
Args:
facts (dict): existing facts
@@ -545,29 +527,17 @@ def set_deployment_facts_if_unset(facts):
dict: the facts dict updated with the generated deployment_type
facts
"""
- # disabled to avoid breaking up facts related to deployment type into
- # multiple methods for now.
- # pylint: disable=too-many-statements, too-many-branches
- for role in ('master', 'node'):
- if role in facts:
- deployment_type = facts['common']['deployment_type']
- if 'registry_url' not in facts[role]:
- registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type == 'openshift-enterprise':
- registry_url = 'openshift3/ose-${component}:${version}'
- facts[role]['registry_url'] = registry_url
-
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
-
- if 'node' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'storage_plugin_deps' not in facts['node']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
+ if 'registry_url' not in facts['master']:
+ registry_url = 'openshift/origin-${component}:${version}'
+ if deployment_type == 'openshift-enterprise':
+ registry_url = 'openshift3/ose-${component}:${version}'
+ facts['master']['registry_url'] = registry_url
return facts
@@ -686,26 +656,6 @@ def set_nodename(facts):
return facts
-def migrate_oauth_template_facts(facts):
- """
- Migrate an old oauth template fact to a newer format if it's present.
-
- The legacy 'oauth_template' fact was just a filename, and assumed you were
- setting the 'login' template.
-
- The new pluralized 'oauth_templates' fact is a dict mapping the template
- name to a filename.
-
- Simplify the code after this by merging the old fact into the new.
- """
- if 'master' in facts and 'oauth_template' in facts['master']:
- if 'oauth_templates' not in facts['master']:
- facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
- elif 'login' not in facts['master']['oauth_templates']:
- facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
- return facts
-
-
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -792,62 +742,6 @@ def get_current_config(facts):
return current_config
-def build_kubelet_args(facts):
- """Build node kubelet_args
-
-In the node-config.yaml file, kubeletArgument sub-keys have their
-values provided as a list. Hence the gratuitous use of ['foo'] below.
- """
- cloud_cfg_path = os.path.join(
- facts['common']['config_base'],
- 'cloudprovider')
-
- # We only have to do this stuff on hosts that are nodes
- if 'node' in facts:
- # Any changes to the kubeletArguments parameter are stored
- # here first.
- kubelet_args = {}
-
- if 'cloudprovider' in facts:
- # EVERY cloud is special <3
- if 'kind' in facts['cloudprovider']:
- if facts['cloudprovider']['kind'] == 'aws':
- kubelet_args['cloud-provider'] = ['aws']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- kubelet_args['cloud-provider'] = ['openstack']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
- if facts['cloudprovider']['kind'] == 'gce':
- kubelet_args['cloud-provider'] = ['gce']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
-
- # Automatically add node-labels to the kubeletArguments
- # parameter. See BZ1359848 for additional details.
- #
- # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
- if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
- # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
- # into ['foo=bar', 'a=b']
- #
- # On the openshift_node_labels inventory variable we loop
- # over each key-value tuple (from .items()) and join the
- # key to the value with an '=' character, this produces a
- # list.
- #
- # map() seems to be returning an itertools.imap object
- # instead of a list. We cast it to a list ourselves.
- # pylint: disable=unnecessary-lambda
- labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
- if labels_str != '':
- kubelet_args['node-labels'] = labels_str
-
- # If we've added items to the kubelet_args dict then we need
- # to merge the new items back into the main facts object.
- if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
- return facts
-
-
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
@@ -973,7 +867,7 @@ def get_openshift_version(facts):
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
- elif 'common' in facts and 'is_containerized' in facts['common']:
+ else:
version = get_container_openshift_version(facts)
# Handle containerized masters that have not yet been configured as a node.
@@ -1364,75 +1258,10 @@ def set_container_facts_if_unset(facts):
dict: the facts dict updated with the generated containerization
facts
"""
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'openshift-enterprise':
- master_image = 'openshift3/ose'
- node_image = 'openshift3/node'
- ovs_image = 'openshift3/openvswitch'
- pod_image = 'openshift3/ose-pod'
- router_image = 'openshift3/ose-haproxy-router'
- registry_image = 'openshift3/ose-docker-registry'
- deployer_image = 'openshift3/ose-deployer'
- else:
- master_image = 'openshift/origin'
- node_image = 'openshift/node'
- ovs_image = 'openshift/openvswitch'
- pod_image = 'openshift/origin-pod'
- router_image = 'openshift/origin-haproxy-router'
- registry_image = 'openshift/origin-docker-registry'
- deployer_image = 'openshift/origin-deployer'
-
- facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
-
- if 'is_containerized' not in facts['common']:
- facts['common']['is_containerized'] = facts['common']['is_atomic']
- if 'pod_image' not in facts['common']:
- facts['common']['pod_image'] = pod_image
- if 'router_image' not in facts['common']:
- facts['common']['router_image'] = router_image
- if 'registry_image' not in facts['common']:
- facts['common']['registry_image'] = registry_image
- if 'deployer_image' not in facts['common']:
- facts['common']['deployer_image'] = deployer_image
- if 'master' in facts and 'master_image' not in facts['master']:
- facts['master']['master_image'] = master_image
- facts['master']['master_system_image'] = master_image
- if 'node' in facts:
- if 'node_image' not in facts['node']:
- facts['node']['node_image'] = node_image
- facts['node']['node_system_image'] = node_image
- if 'ovs_image' not in facts['node']:
- facts['node']['ovs_image'] = ovs_image
- facts['node']['ovs_system_image'] = ovs_image
-
- if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
-def set_installed_variant_rpm_facts(facts):
- """ Set RPM facts of installed variant
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with installed_variant_rpms
- """
- installed_rpms = []
- for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
- optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
- variant_rpms = [base_rpm] + \
- ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
- ['tuned-profiles-%s-node' % base_rpm]
- for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
- if exit_code == 0:
- installed_rpms.append(rpm)
-
- facts['common']['installed_variant_rpms'] = installed_rpms
- return facts
-
-
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
@@ -1538,14 +1367,12 @@ class OpenShiftFacts(object):
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite)
- facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
- facts = build_kubelet_args(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
@@ -1553,8 +1380,6 @@ class OpenShiftFacts(object):
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
- if not safe_get_bool(facts['common']['is_containerized']):
- facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
return dict(openshift=facts)
@@ -1582,7 +1407,6 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1607,10 +1431,7 @@ class OpenShiftFacts(object):
max_requests_inflight=500)
if 'node' in roles:
- defaults['node'] = dict(labels={}, annotations={},
- iptables_sync_period='30s',
- local_quota_per_fsgroup="",
- set_node_ip=False)
+ defaults['node'] = dict(labels={})
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml
new file mode 100644
index 000000000..7fd7a085d
--- /dev/null
+++ b/roles/openshift_grafana/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+gf_body_tmp:
+ name: grafana_name
+ type: prometheus
+ typeLogoUrl: ''
+ access: proxy
+ url: prometheus_url
+ basicAuth: false
+ withCredentials: false
+ jsonData:
+ tlsSkipVerify: true
+ token: satoken
diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
new file mode 100644
index 000000000..82fa89004
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
@@ -0,0 +1,661 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: The location of the proxy image
+ name: IMAGE_GF
+ value: mrsiano/grafana-ocp:latest
+- description: The location of the proxy image
+ name: IMAGE_PROXY
+ value: openshift/oauth-proxy:v1.0.0
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+- description: The session secret for the proxy
+ name: SESSION_SECRET
+ generate: expression
+ from: "[a-zA-Z0-9]{43}"
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ annotations:
+ serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}'
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gf-cluster-reader
+ roleRef:
+ name: cluster-reader
+ subjects:
+ - kind: ServiceAccount
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+ tls:
+ termination: Reencrypt
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/scheme: https
+ service.alpha.openshift.io/serving-cert-secret-name: gf-tls
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ ports:
+ - name: grafana-ocp
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: grafana-ocp
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: gf-proxy
+ namespace: "${NAMESPACE}"
+ stringData:
+ session_secret: "${SESSION_SECRET}="
+# Deploy Prometheus behind an oauth proxy
+- apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: grafana-ocp
+ template:
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp-app
+ spec:
+ serviceAccountName: grafana-ocp
+ containers:
+ - name: oauth-proxy
+ image: ${IMAGE_PROXY}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp
+ - -upstream=http://localhost:3000
+ - -provider=openshift
+# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}'
+ - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+
+ - name: grafana-ocp
+ image: ${IMAGE_GF}
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ name: gf-data
+ - mountPath: "/root/go/src/github.com/grafana/grafana/conf"
+ name: gfconfig
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+ command:
+ - "./bin/grafana-server"
+
+ volumes:
+ - name: gfconfig
+ configMap:
+ name: gf-config
+ - name: secrets
+ secret:
+ secretName: gf-proxy
+ - name: gf-tls
+ secret:
+ secretName: gf-tls
+ - emptyDir: {}
+ name: gf-data
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: gf-config
+ namespace: "${NAMESPACE}"
+ data:
+ defaults.ini: |-
+ ##################### Grafana Configuration Defaults #####################
+ #
+ # Do not modify this file in grafana installs
+ #
+
+ # possible values : production, development
+ app_mode = production
+
+ # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+ instance_name = ${HOSTNAME}
+
+ #################################### Paths ###############################
+ [paths]
+ # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+ #
+ data = data
+ #
+ # Directory where grafana can store logs
+ #
+ logs = data/log
+ #
+ # Directory where grafana will automatically scan and look for plugins
+ #
+ plugins = data/plugins
+
+ #################################### Server ##############################
+ [server]
+ # Protocol (http, https, socket)
+ protocol = http
+
+ # The ip address to bind to, empty will bind to all interfaces
+ http_addr =
+
+ # The http port to use
+ http_port = 3000
+
+ # The public facing domain name used to access grafana from a browser
+ domain = localhost
+
+ # Redirect to correct domain if host header does not match domain
+ # Prevents DNS rebinding attacks
+ enforce_domain = false
+
+ # The full public facing url
+ root_url = %(protocol)s://%(domain)s:%(http_port)s/
+
+ # Log web requests
+ router_logging = false
+
+ # the path relative working path
+ static_root_path = public
+
+ # enable gzip
+ enable_gzip = false
+
+ # https certs & key file
+ cert_file = /etc/tls/private/tls.crt
+ cert_key = /etc/tls/private/tls.key
+
+ # Unix socket path
+ socket = /tmp/grafana.sock
+
+ #################################### Database ############################
+ [database]
+ # You can configure the database connection by specifying type, host, name, user and password
+ # as separate properties or as on string using the url property.
+
+ # Either "mysql", "postgres" or "sqlite3", it's your choice
+ type = sqlite3
+ host = 127.0.0.1:3306
+ name = grafana
+ user = root
+ # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+ password =
+ # Use either URL or the previous fields to configure the database
+ # Example: mysql://user:secret@host:port/database
+ url =
+
+ # Max idle conn setting default is 2
+ max_idle_conn = 2
+
+ # Max conn setting default is 0 (mean not set)
+ max_open_conn =
+
+ # For "postgres", use either "disable", "require" or "verify-full"
+ # For "mysql", use either "true", "false", or "skip-verify".
+ ssl_mode = disable
+
+ ca_cert_path =
+ client_key_path =
+ client_cert_path =
+ server_cert_name =
+
+ # For "sqlite3" only, path relative to data_path setting
+ path = grafana.db
+
+ #################################### Session #############################
+ [session]
+ # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
+ provider = file
+
+ # Provider config options
+ # memory: not have any config yet
+ # file: session dir path, is relative to grafana data_path
+ # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
+ # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
+ # mysql: go-sql-driver/mysql dsn config string, examples:
+ # `user:password@tcp(127.0.0.1:3306)/database_name`
+ # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
+ # memcache: 127.0.0.1:11211
+
+
+ provider_config = sessions
+
+ # Session cookie name
+ cookie_name = grafana_sess
+
+ # If you use session in https only, default is false
+ cookie_secure = false
+
+ # Session life time, default is 86400
+ session_life_time = 86400
+ gc_interval_time = 86400
+
+ #################################### Data proxy ###########################
+ [dataproxy]
+
+ # This enables data proxy logging, default is false
+ logging = false
+
+ #################################### Analytics ###########################
+ [analytics]
+ # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+ # No ip addresses are being tracked, only simple counters to track
+ # running instances, dashboard and error counts. It is very helpful to us.
+ # Change this option to false to disable reporting.
+ reporting_enabled = true
+
+ # Set to false to disable all checks to https://grafana.com
+ # for new versions (grafana itself and plugins), check is used
+ # in some UI views to notify that grafana or plugin update exists
+ # This option does not cause any auto updates, nor send any information
+ # only a GET request to https://grafana.com to get latest versions
+ check_for_updates = true
+
+ # Google Analytics universal tracking code, only enabled if you specify an id here
+ google_analytics_ua_id =
+
+ # Google Tag Manager ID, only enabled if you specify an id here
+ google_tag_manager_id =
+
+ #################################### Security ############################
+ [security]
+ # default admin user, created on startup
+ admin_user = admin
+
+ # default admin password, can be changed before first start of grafana, or in profile settings
+ admin_password = admin
+
+ # used for signing
+ secret_key = SW2YcwTIb9zpOOhoPsMm
+
+ # Auto-login remember days
+ login_remember_days = 7
+ cookie_username = grafana_user
+ cookie_remember_name = grafana_remember
+
+ # disable gravatar profile images
+ disable_gravatar = false
+
+ # data source proxy whitelist (ip_or_domain:port separated by spaces)
+ data_source_proxy_whitelist =
+
+ [snapshots]
+ # snapshot sharing options
+ external_enabled = true
+ external_snapshot_url = https://snapshots-origin.raintank.io
+ external_snapshot_name = Publish to snapshot.raintank.io
+
+ # remove expired snapshot
+ snapshot_remove_expired = true
+
+ # remove snapshots after 90 days
+ snapshot_TTL_days = 90
+
+ #################################### Users ####################################
+ [users]
+ # disable user signup / registration
+ allow_sign_up = true
+
+ # Allow non admin users to create organizations
+ allow_org_create = true
+
+ # Set to true to automatically assign new users to the default organization (id 1)
+ auto_assign_org = true
+
+ # Default role new users will be automatically assigned (if auto_assign_org above is set to true)
+ auto_assign_org_role = Admin
+
+ # Require email validation before sign up completes
+ verify_email_enabled = false
+
+ # Background text for the user field on the login page
+ login_hint = email or username
+
+ # Default UI theme ("dark" or "light")
+ default_theme = dark
+
+ # External user management
+ external_manage_link_url =
+ external_manage_link_name =
+ external_manage_info =
+
+ [auth]
+ # Set to true to disable (hide) the login form, useful if you use OAuth
+ disable_login_form = true
+
+ # Set to true to disable the signout link in the side menu. useful if you use auth.proxy
+ disable_signout_menu = true
+
+ #################################### Anonymous Auth ######################
+ [auth.anonymous]
+ # enable anonymous access
+ enabled = true
+
+ # specify organization name that should be used for unauthenticated users
+ org_name = Main Org.
+
+ # specify role for unauthenticated users
+ org_role = Admin
+
+ #################################### Github Auth #########################
+ [auth.github]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url = https://github.com/login/oauth/authorize
+ token_url = https://github.com/login/oauth/access_token
+ api_url = https://api.github.com/user
+ team_ids =
+ allowed_organizations =
+
+ #################################### Google Auth #########################
+ [auth.google]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_client_id
+ client_secret = some_client_secret
+ scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
+ auth_url = https://accounts.google.com/o/oauth2/auth
+ token_url = https://accounts.google.com/o/oauth2/token
+ api_url = https://www.googleapis.com/oauth2/v1/userinfo
+ allowed_domains =
+ hosted_domain =
+
+ #################################### Grafana.com Auth ####################
+ # legacy key names (so they work in env variables)
+ [auth.grafananet]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ [auth.grafana_com]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ #################################### Generic OAuth #######################
+ [auth.generic_oauth]
+ name = OAuth
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url =
+ token_url =
+ api_url =
+ team_ids =
+ allowed_organizations =
+
+ #################################### Basic Auth ##########################
+ [auth.basic]
+ enabled = false
+
+ #################################### Auth Proxy ##########################
+ [auth.proxy]
+ enabled = true
+ header_name = X-WEBAUTH-USER
+ header_property = username
+ auto_sign_up = true
+ ldap_sync_ttl = 60
+ whitelist =
+
+ #################################### Auth LDAP ###########################
+ [auth.ldap]
+ enabled = false
+ config_file = /etc/grafana/ldap.toml
+ allow_sign_up = true
+
+ #################################### SMTP / Emailing #####################
+ [smtp]
+ enabled = false
+ host = localhost:25
+ user =
+ # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
+ password =
+ cert_file =
+ key_file =
+ skip_verify = false
+ from_address = admin@grafana.localhost
+ from_name = Grafana
+ ehlo_identity =
+
+ [emails]
+ welcome_email_on_sign_up = false
+ templates_pattern = emails/*.html
+
+ #################################### Logging ##########################
+ [log]
+ # Either "console", "file", "syslog". Default is console and file
+ # Use space to separate multiple modes, e.g. "console file"
+ mode = console file
+
+ # Either "debug", "info", "warn", "error", "critical", default is "info"
+ level = error
+
+ # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
+ filters =
+
+ # For "console" mode only
+ [log.console]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = console
+
+ # For "file" mode only
+ [log.file]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # This enables automated log rotate(switch of following options), default is true
+ log_rotate = true
+
+ # Max line number of single file, default is 1000000
+ max_lines = 1000000
+
+ # Max size shift of single file, default is 28 means 1 << 28, 256MB
+ max_size_shift = 28
+
+ # Segment log daily, default is true
+ daily_rotate = true
+
+ # Expired days of log file(delete after max days), default is 7
+ max_days = 7
+
+ [log.syslog]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+ network =
+ address =
+
+ # Syslog facility. user, daemon and local0 through local7 are valid.
+ facility =
+
+ # Syslog tag. By default, the process' argv[0] is used.
+ tag =
+
+
+ #################################### AMQP Event Publisher ################
+ [event_publisher]
+ enabled = false
+ rabbitmq_url = amqp://localhost/
+ exchange = grafana_events
+
+ #################################### Dashboard JSON files ################
+ [dashboards.json]
+ enabled = false
+ path = /var/lib/grafana/dashboards
+
+ #################################### Usage Quotas ########################
+ [quota]
+ enabled = false
+
+ #### set quotas to -1 to make unlimited. ####
+ # limit number of users per Org.
+ org_user = 10
+
+ # limit number of dashboards per Org.
+ org_dashboard = 100
+
+ # limit number of data_sources per Org.
+ org_data_source = 10
+
+ # limit number of api_keys per Org.
+ org_api_key = 10
+
+ # limit number of orgs a user can create.
+ user_org = 10
+
+ # Global limit of users.
+ global_user = -1
+
+ # global limit of orgs.
+ global_org = -1
+
+ # global limit of dashboards
+ global_dashboard = -1
+
+ # global limit of api_keys
+ global_api_key = -1
+
+ # global limit on number of logged in users.
+ global_session = -1
+
+ #################################### Alerting ############################
+ [alerting]
+ # Disable alerting engine & UI features
+ enabled = true
+ # Makes it possible to turn off alert rule execution but alerting UI is visible
+ execute_alerts = true
+
+ #################################### Internal Grafana Metrics ############
+ # Metrics available at HTTP API Url /api/metrics
+ [metrics]
+ enabled = true
+ interval_seconds = 10
+
+ # Send internal Grafana metrics to graphite
+ [metrics.graphite]
+ # Enable by setting the address setting (ex localhost:2003)
+ address =
+ prefix = prod.grafana.%(instance_name)s.
+
+ [grafana_net]
+ url = https://grafana.com
+
+ [grafana_com]
+ url = https://grafana.com
+
+ #################################### Distributed tracing ############
+ [tracing.jaeger]
+ # jaeger destination (ex localhost:6831)
+ address =
+ # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
+ always_included_tag =
+ # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ sampler_type = const
+ # jaeger samplerconfig param
+ # for "const" sampler, 0 or 1 for always false/true respectively
+ # for "probabilistic" sampler, a probability between 0 and 1
+ # for "rateLimiting" sampler, the number of spans per second
+ # for "remote" sampler, param is the same as for "probabilistic"
+ # and indicates the initial sampling rate before the actual one
+ # is received from the mothership
+ sampler_param = 1
+
+ #################################### External Image Storage ##############
+ [external_image_storage]
+ # You can choose between (s3, webdav, gcs)
+ provider =
+
+ [external_image_storage.s3]
+ bucket_url =
+ bucket =
+ region =
+ path =
+ access_key =
+ secret_key =
+
+ [external_image_storage.webdav]
+ url =
+ username =
+ password =
+ public_url =
+
+ [external_image_storage.gcs]
+ key_file =
+ bucket =
diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml
new file mode 100644
index 000000000..bc7b4b286
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp.yml
@@ -0,0 +1,76 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+objects:
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ ports:
+ - port: 8082
+ protocol: TCP
+ targetPort: grafana-http
+- apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ replicas: 1
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ volumes:
+ - name: data
+ emptyDir: {}
+ containers:
+ - image: "mrsiano/grafana-ocp:latest"
+ name: grafana-ocp
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - name: data
+ mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ command:
+ - "./bin/grafana-server"
diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
new file mode 100644
index 000000000..f59ca997f
--- /dev/null
+++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
@@ -0,0 +1,5138 @@
+{
+ "dashboard": {
+ "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.",
+ "editable": true,
+ "gnetId": 315,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "rows": [
+ {
+ "collapse": false,
+ "height": "200px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "200px",
+ "id": 32,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Received",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Sent",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network I/O pressure",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Network I/O pressure",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 4,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster memory usage",
+ "transparent": false,
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 6,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster CPU usage ",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster filesystem usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 9,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "20%",
+ "prefix": "",
+ "prefixFontSize": "20%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 10,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 11,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 12,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 13,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 14,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Total usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "overall cpu usage",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cluster CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": "% Usage",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": null,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers Cores Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 411,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 34,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes Memory usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 26,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ systemd_service_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 30,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "D",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "C",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "E",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "F",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 277,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ pod_name }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ pod_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ id }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ id }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 35,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total) by (phase,reason)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ phase }} | {{ reason }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_build_total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 54,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 55,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 56,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Failed\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds, regardless of the failure reason.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 58,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Complete\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of successfully completed builds.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 0,
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\"} offset 5m",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ reason }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Builds",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 36,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_setup_latency_sum)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_setup_latency_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 41,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_teardown_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 pods doing the most receive network traffic",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 37,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_pod_ips",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ instance }} | {{ role }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_ips",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 39,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 42,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 40,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift SDN",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 44,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_pleg_relist",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_timeout",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_errors",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Kubelet",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 46,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(scrape_samples_scraped[2m])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{ kubernetes_name }} | {{ instance }} ",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "scrape_samples_scraped",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU per instance of Prometheus container.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Prometheus",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ verb }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ pod }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of non-mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 74,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "endpoint_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": " quantile {{ quantile }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "endpoint_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "API Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 61,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "etcd_disk_wal_fsync_duration_seconds_count",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "etcd_disk_wal_fsync_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "etcd",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 62,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(changes(container_start_time_seconds[10m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "The number of containers that start or restart over the last ten minutes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Changes in your cluster",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 63,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of cores in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 64,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of consumed cores.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per node in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 66,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumption per system service or container on the infrastructure nodes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per namespace on the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 47,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster CPU in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 69,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster memory in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 70,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Aggregate CPU usage (seconds total) of etcd+docker",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "System and container CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 71,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "volumes_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "volumes_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 72,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ request }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "cloudprovider_aws_api_request_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 73,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "storage_operation_duration_seconds_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Volumes",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes",
+ "openshift"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {},
+ "datasource": "${DS_PR}",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Node",
+ "options": [],
+ "query": "label_values(kubernetes_io_hostname)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "1s",
+ "2m",
+ "20s",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "openshift cluster monitoring",
+ "version": 6
+ }
+}
diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml
new file mode 100644
index 000000000..8dea6f197
--- /dev/null
+++ b/roles/openshift_grafana/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Eldad Marciano
+ description: Setup grafana pod
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - metrics
diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml
new file mode 100644
index 000000000..9d3c741ee
--- /dev/null
+++ b/roles/openshift_grafana/tasks/gf-permissions.yml
@@ -0,0 +1,12 @@
+---
+- name: Create gf user on htpasswd
+ command: htpasswd -c /etc/origin/master/htpasswd gfadmin
+
+- name: Make sure master config use HTPasswdPasswordIdentityProvider
+ command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml"
+
+- name: Grant permission for gfuser
+ command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin
+
+- name: Restart mater api
+ command: systemctl restart atomic-openshift-master-api.service
diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml
new file mode 100644
index 000000000..6a06d40a9
--- /dev/null
+++ b/roles/openshift_grafana/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+- name: Create grafana namespace
+ oc_project:
+ state: present
+ name: grafana
+
+- name: Configure Grafana Permissions
+ include_tasks: tasks/gf-permissions.yml
+ when: gf_oauth | default(false) | bool == true
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml
+ register:
+ cl_file: /tmp/grafana-ocp.yaml
+ when: gf_oauth | default(false) | bool == false
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml
+ register:
+ cl_file: /tmp/grafana-ocp-oauth.yaml
+ when: gf_oauth | default(false) | bool == true
+
+- name: Process the grafana file
+ oc_process:
+ namespace: grafana
+ template_name: "{{ cl_file }}"
+ create: True
+ when: gf_oauth | default(false) | bool == true
+
+- name: Wait to grafana be running
+ command: oc rollout status deployment/grafana-ocp
+
+- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }}
+ oc_adm_policy_user:
+ user: grafana-ocp
+ resource_kind: cluster-role
+ resource_name: view
+ state: present
+ role_namespace: "{{ gf_prometheus_namespace }}"
+
+- name: Get grafana route
+ oc_obj:
+ kind: route
+ name: grafana
+ namespace: grafana
+ register: route
+
+- name: Get prometheus route
+ oc_obj:
+ kind: route
+ name: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: route
+
+- name: Get the prometheus SA
+ oc_serviceaccount_secret:
+ state: list
+ service_account: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: sa
+
+- name: Get the management SA bearer token
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: "{{ gf_prometheus_namespace }}"
+ no_log: True
+ register: sa_secret
+
+- name: Get the SA bearer token for prometheus
+ set_fact:
+ token: "{{ sa_secret.results.encoded.token }}"
+
+- name: Convert to json
+ var:
+ ds_json: "{{ gf_body_tmp }} | to_json }}"
+
+- name: Set protocol type
+ var:
+ protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}"
+
+- name: Add gf datasrouce
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/datasources"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex setup ds name
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
+
+- name: Add new dashboard
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/dashboards/db"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex json tear down
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index dcaf87eca..c83adb26d 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -175,6 +175,8 @@ def format_failure(failure):
play = failure['play']
task = failure['task']
msg = failure['msg']
+ if not isinstance(msg, string_types):
+ msg = str(msg)
checks = failure['checks']
fields = (
(u'Hosts', host),
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index bc8e7bdcf..b8a59ee14 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -1,3 +1,4 @@
---
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index b7b16e0ea..b9c41d1b4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -5,6 +5,7 @@ Health checks for OpenShift clusters.
import json
import operator
import os
+import re
import time
import collections
@@ -95,6 +96,13 @@ class OpenShiftCheck(object):
# These are intended to be a sequential record of what the check observed and determined.
self.logs = []
+ def template_var(self, var_to_template):
+ """Return a templated variable if self._templar is not None, else
+ just return the variable as-is"""
+ if self._templar is not None:
+ return self._templar.template(var_to_template)
+ return var_to_template
+
@abstractproperty
def name(self):
"""The name of this check, usually derived from the class name."""
@@ -302,28 +310,38 @@ class OpenShiftCheck(object):
name_list = name_list.split(',')
return [name.strip() for name in name_list if name.strip()]
- @staticmethod
- def get_major_minor_version(openshift_image_tag):
+ def get_major_minor_version(self, openshift_image_tag=None):
"""Parse and return the deployed version of OpenShift as a tuple."""
- if openshift_image_tag and openshift_image_tag[0] == 'v':
- openshift_image_tag = openshift_image_tag[1:]
- # map major release versions across releases
- # to a common major version
- openshift_major_release_version = {
- "1": "3",
- }
+ version = openshift_image_tag or self.get_var("openshift_image_tag")
+ components = [int(component) for component in re.findall(r'\d+', version)]
- components = openshift_image_tag.split(".")
- if not components or len(components) < 2:
+ if len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(openshift_image_tag))
+ raise OpenShiftCheckException(msg.format(version))
+
+ # map major release version across releases to OCP major version
+ components[0] = {1: 3}.get(components[0], components[0])
+
+ return tuple(int(x) for x in components[:2])
+
+ def get_required_version(self, name, version_map):
+ """Return the correct required version(s) for the current (or nearest) OpenShift version."""
+ openshift_version = self.get_major_minor_version()
+
+ earliest = min(version_map)
+ latest = max(version_map)
+ if openshift_version < earliest:
+ return version_map[earliest]
+ if openshift_version > latest:
+ return version_map[latest]
- if components[0] in openshift_major_release_version:
- components[0] = openshift_major_release_version[components[0]]
+ required_version = version_map.get(openshift_version)
+ if not required_version:
+ msg = "There is no recommended version of {} for the current version of OpenShift ({})"
+ raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version)))
- components = tuple(int(x) for x in components[:2])
- return components
+ return required_version
def find_ansible_mount(self, path):
"""Return the mount point for path from ansible_mounts."""
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 87e6146d4..6e30a8610 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck):
'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
- # see roles/openshift_cli/library/openshift_container_binary_sync.py.
+ # see roles/lib_utils/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 4f91f6bb3..ac6ffbbad 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# ordered list of registries (according to inventory vars) that docker will try for unscoped images
regs = self.ensure_list("openshift_docker_additional_registries")
# currently one of these registries is added whether the user wants it or not.
- deployment_type = self.get_var("openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type", default="")
if deployment_type == "origin" and "docker.io" not in regs:
regs.append("docker.io")
elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
@@ -64,7 +64,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
self.registries["configured"] = regs
# for the oreg_url registry there may be credentials specified
- components = self.get_var("oreg_url", default="").split('/')
+ oreg_url = self.get_var("oreg_url", default="")
+ oreg_url = self.template_var(oreg_url)
+ components = oreg_url.split('/')
self.registries["oreg"] = "" if len(components) < 3 else components[0]
# Retrieve and template registry credentials, if provided
@@ -72,9 +74,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
oreg_auth_user = self.get_var('oreg_auth_user', default='')
oreg_auth_password = self.get_var('oreg_auth_password', default='')
if oreg_auth_user != '' and oreg_auth_password != '':
- if self._templar is not None:
- oreg_auth_user = self._templar.template(oreg_auth_user)
- oreg_auth_password = self._templar.template(oreg_auth_password)
+ oreg_auth_user = self.template_var(oreg_auth_user)
+ oreg_auth_password = self.template_var(oreg_auth_password)
self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
@@ -153,6 +154,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
image_url = self.get_var("oreg_url", default="") or image_url
+ image_url = self.template_var(image_url)
if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
@@ -160,7 +162,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
required.add(self._registry_console_image(image_tag, image_info))
# images for containerized components
- if self.get_var("openshift", "common", "is_containerized"):
+ if self.get_var("openshift_is_containerized"):
components = set()
if 'oo_nodes_to_config' in host_groups:
components.update(["node", "openvswitch"])
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
index 8b20ccb49..b56d2092b 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck):
return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
def run(self):
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- unit = "etcd_container" if is_containerized else "etcd"
+ openshift_is_containerized = self.get_var("openshift_is_containerized")
+ unit = "etcd_container" if openshift_is_containerized else "etcd"
log_matchers = [{
"start_regexp": r"Starting Etcd Server",
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 986a01f38..7f8c6ebdc 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck):
"""
errors = []
for pod_name in pods_by_name.keys():
- df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 3b1cf8baa..16ec3a7f6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment
import json
import ssl
-try:
- from urllib2 import HTTPError, URLError
- import urllib2
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib import request
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
@@ -65,7 +64,7 @@ class Kibana(LoggingCheck):
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
- return_code = urllib2.urlopen(url, context=ctx).getcode()
+ return_code = request.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index cfbdea303..567162be1 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -10,8 +10,8 @@ class NotContainerizedMixin(object):
def is_active(self):
"""Only run on non-containerized hosts."""
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- return super(NotContainerizedMixin, self).is_active() and not is_containerized
+ openshift_is_containerized = self.get_var("openshift_is_containerized")
+ return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized
class DockerHostMixin(object):
@@ -23,7 +23,7 @@ class DockerHostMixin(object):
"""Only run on hosts that depend on Docker."""
group_names = set(self.get_var("group_names", default=[]))
needs_docker = set(["oo_nodes_to_config"])
- if self.get_var("openshift.common.is_containerized"):
+ if self.get_var("openshift_is_containerized"):
needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
@@ -33,7 +33,7 @@ class DockerHostMixin(object):
(which would not be able to install but should already have them).
Returns: msg, failed
"""
- if self.get_var("openshift", "common", "is_atomic"):
+ if self.get_var("openshift_is_atomic"):
return "", False
# NOTE: we would use the "package" module but it's actually an action plugin
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 0cad19842..58a2692bd 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
currently installed version of OpenShift.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
tags = ["health"]
openshift_to_ovs_version = {
- "3.7": ["2.6", "2.7", "2.8"],
- "3.6": ["2.6", "2.7", "2.8"],
- "3.5": ["2.6", "2.7"],
- "3.4": "2.4",
+ (3, 4): "2.4",
+ (3, 5): ["2.6", "2.7"],
+ (3, 6): ["2.6", "2.7", "2.8"],
+ (3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
def is_active(self):
@@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
return self.execute_module("rpm_version", args)
def get_required_ovs_version(self):
- """Return the correct Open vSwitch version for the current OpenShift version"""
- openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
-
- if openshift_version_tuple < (3, 5):
- return self.openshift_to_ovs_version["3.4"]
-
- openshift_version = ".".join(str(x) for x in openshift_version_tuple)
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if ovs_version:
- return self.openshift_to_ovs_version[openshift_version]
-
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(openshift_version))
+ """Return the correct Open vSwitch version(s) for the current OpenShift version."""
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index f3a628e28..28aee8b35 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,8 +1,6 @@
"""Check that available RPM packages match the required versions."""
-import re
-
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 5): ["2.6", "2.7"],
(3, 6): ["2.6", "2.7", "2.8"],
(3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
openshift_to_docker_version = {
@@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 4): "1.12",
(3, 5): "1.12",
(3, 6): "1.12",
- }
-
- # map major OpenShift release versions across releases to a common major version
- map_major_release_version = {
- 1: 3,
+ (3, 7): "1.12",
+ (3, 8): "1.12",
+ (3, 9): ["1.12", "1.13"],
}
def is_active(self):
@@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_ovs_version)
- latest = max(self.openshift_to_ovs_version)
- if openshift_version < earliest:
- return self.openshift_to_ovs_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_ovs_version[latest]
-
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if not ovs_version:
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return ovs_version
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
def get_required_docker_version(self):
"""Return the correct Docker version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_docker_version)
- latest = max(self.openshift_to_docker_version)
- if openshift_version < earliest:
- return self.openshift_to_docker_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_docker_version[latest]
-
- docker_version = self.openshift_to_docker_version.get(openshift_version)
- if not docker_version:
- msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return docker_version
-
- def get_openshift_version_tuple(self):
- """Return received image tag as a normalized (X, Y) minor version tuple."""
- version = self.get_var("openshift_image_tag")
- comps = [int(component) for component in re.findall(r'\d+', version)]
-
- if len(comps) < 2:
- msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(version))
-
- comps[0] = self.map_major_release_version.get(comps[0], comps[0])
- return tuple(comps[0:2])
+ return self.get_required_version("Docker", self.openshift_to_docker_version)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index fc333dfd4..9fd6e049d 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability,
@pytest.fixture()
def task_vars():
return dict(
- openshift=dict(
- common=dict(
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(),
- ),
+ openshift_is_atomic=False,
+ openshift_is_containerized=False,
openshift_service_type='origin',
openshift_deployment_type='origin',
openshift_image_tag='',
@@ -20,7 +15,7 @@ def task_vars():
)
-@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
@@ -30,20 +25,20 @@ def task_vars():
("origin", True, ["nfs"], False),
("openshift-enterprise", True, ["lb"], False),
])
-def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
- task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift_is_containerized'] = openshift_is_containerized
task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
-@pytest.mark.parametrize("is_containerized,is_atomic", [
+@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [
(True, True),
(False, False),
(True, False),
(False, True),
])
-def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
return {}
@@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
'images': [module_args['name']],
}
- task_vars['openshift']['common']['is_containerized'] = is_containerized
- task_vars['openshift']['common']['is_atomic'] = is_atomic
+ task_vars['openshift_is_containerized'] = openshift_is_containerized
+ task_vars['openshift_is_atomic'] = openshift_is_atomic
result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
assert expect_registries_reached == check.reachable_registries
-@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
+@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
"origin", False, ['oo_nodes_to_config'], "",
set([
@@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
),
])
-def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected):
+def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected):
task_vars = dict(
- openshift=dict(
- common=dict(
- is_containerized=is_containerized,
- is_atomic=False,
- ),
- ),
+ openshift_is_containerized=openshift_is_containerized,
+ openshift_is_atomic=False,
openshift_deployment_type=deployment_type,
group_names=groups,
oreg_url=oreg_url,
@@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected):
def test_containerized_etcd():
task_vars = dict(
- openshift=dict(
- common=dict(
- is_containerized=True,
- ),
- ),
+ openshift_is_containerized=True,
openshift_deployment_type="origin",
group_names=['oo_etcd_to_config'],
)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 8fa68c378..33a5dd90a 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException
from openshift_checks.docker_storage import DockerStorage
-@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [
(False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
(False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
(True, ["oo_etcd_to_config"], True),
])
-def test_is_active(is_containerized, group_names, is_active):
+def test_is_active(openshift_is_containerized, group_names, is_active):
task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
group_names=group_names,
)
assert DockerStorage(None, task_vars).is_active() == is_active
def non_atomic_task_vars():
- return {"openshift": {"common": {"is_atomic": False}}}
+ return {"openshift_is_atomic": False}
@pytest.mark.parametrize('docker_info, failed, expect_msg', [
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index a29dc166b..583c4c8dd 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
task_vars = dict(
group_names=group_names,
- openshift=dict(
- common=dict(is_containerized=False),
- ),
+ openshift_is_containerized=False,
openshift_service_type="origin"
)
@@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
assert result.get("failed", False) == failed
-@pytest.mark.parametrize('is_containerized,expected_unit_value', [
+@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [
(False, "etcd"),
(True, "etcd_container"),
])
-def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value):
+def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):
task_vars = dict(
- openshift=dict(
- common=dict(is_containerized=is_containerized),
- )
+ openshift_is_containerized=openshift_is_containerized
)
def execute_module(module_name, args, *_):
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 04a5e89c4..750d4b9e9 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -1,12 +1,10 @@
import pytest
import json
-try:
- import urllib2
- from urllib2 import HTTPError, URLError
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error
+# pylint: disable=import-error
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException
@@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
if type(lib_result) is int:
return _http_return(lib_result)
raise lib_result
- monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+ monkeypatch.setattr(request, 'urlopen', urlopen)
check = Kibana()
check._get_kibana_url = lambda: 'url'
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
index b1a41ca3c..b5d6f2e95 100644
--- a/roles/openshift_health_checker/test/mixins_test.py
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
@pytest.mark.parametrize('task_vars,expected', [
- (dict(openshift=dict(common=dict(is_containerized=False))), True),
- (dict(openshift=dict(common=dict(is_containerized=True))), False),
+ (dict(openshift_is_containerized=False), True),
+ (dict(openshift_is_containerized=True), False),
])
def test_is_active(task_vars, expected):
assert NotContainerizedCheck(None, task_vars).is_active() == expected
@@ -20,4 +20,4 @@ def test_is_active(task_vars, expected):
def test_is_active_missing_task_vars():
with pytest.raises(OpenShiftCheckException) as excinfo:
NotContainerizedCheck().is_active()
- assert 'is_containerized' in str(excinfo.value)
+ assert 'openshift_is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index dd98ff4d8..80c7a0541 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -1,26 +1,7 @@
import pytest
-from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
-
-
-def test_openshift_version_not_supported():
- def execute_module(*_):
- return {}
-
- openshift_release = '111.7.0'
-
- task_vars = dict(
- openshift=dict(common=dict()),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
- openshift_service_type='origin'
- )
-
- with pytest.raises(OpenShiftCheckException) as excinfo:
- OvsVersion(execute_module, task_vars).run()
-
- assert "no recommended version of Open vSwitch" in str(excinfo.value)
+from openshift_checks.ovs_version import OvsVersion
+from openshift_checks import OpenShiftCheckException
def test_invalid_openshift_release_format():
@@ -70,7 +51,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
assert result is return_value
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
(['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
(['oo_masters_to_config'], True, False),
@@ -82,9 +63,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
(['lb'], False, False),
(['nfs'], False, False),
])
-def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert OvsVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index a1e6e0879..52740093d 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -3,16 +3,16 @@ import pytest
from openshift_checks.package_availability import PackageAvailability
-@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [
+@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [
('yum', False, True),
('yum', True, False),
('dnf', True, False),
('dnf', False, False),
])
-def test_is_active(pkg_mgr, is_containerized, is_active):
+def test_is_active(pkg_mgr, openshift_is_containerized, is_active):
task_vars = dict(
ansible_pkg_mgr=pkg_mgr,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert PackageAvailability(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index ea8e02b97..868b4bd12 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,6 +1,7 @@
import pytest
-from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
+from openshift_checks.package_version import PackageVersion
+from openshift_checks import OpenShiftCheckException
def task_vars_for(openshift_release, deployment_type):
@@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type):
def test_openshift_version_not_supported():
check = PackageVersion(None, task_vars_for("1.2.3", 'origin'))
- check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict
+ check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict
with pytest.raises(OpenShiftCheckException) as excinfo:
check.get_required_ovs_version()
@@ -99,7 +100,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
assert result == return_value
-@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [
(['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
(['oo_masters_to_config'], True, False),
@@ -111,9 +112,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
(['lb'], False, False),
(['nfs'], False, False),
])
-def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):
task_vars = dict(
group_names=group_names,
- openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_is_containerized=openshift_is_containerized,
)
assert PackageVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index b6501d288..f40085976 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: []
############
openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
-penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_registry_routecertificates: {}
openshift_hosted_registry_routetermination: "passthrough"
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index ac9e241a5..ace2d15b0 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -14,4 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_facts
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index d306adf42..57f59f872 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,6 +1,6 @@
---
-# This role is intended to be used with include_role.
-# include_role:
+# This role is intended to be used with import_role.
+# import_role:
# name: openshift_hosted
# tasks_from: "{{ item }}"
# with_items:
diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 429f0c514..22294e3d4 100644
--- a/roles/openshift_hosted/tasks/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,10 +1,4 @@
---
-- name: Create temp directory for doing work in
- command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
- register: mktempHosted
- changed_when: False
- check_mode: no
-
- name: setup firewall
import_tasks: firewall.yml
vars:
@@ -132,25 +126,10 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+# TODO(michaelgugino) remove this set fact. It is currently necessary due to
+# custom module not properly templating variables.
- name: setup registry list
set_fact:
r_openshift_hosted_registry_list:
- name: "{{ openshift_hosted_registry_name }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
-
-- name: Wait for pod (Registry)
- include_tasks: wait_for_pod.yml
- vars:
- l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
- l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
-
-- include_tasks: storage/glusterfs.yml
- when:
- - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
-
-- name: Delete temp directory
- file:
- name: "{{ mktempHosted.stdout }}"
- state: absent
- changed_when: False
- check_mode: no
diff --git a/roles/openshift_hosted/tasks/registry_storage.yml b/roles/openshift_hosted/tasks/registry_storage.yml
new file mode 100644
index 000000000..aa66a7867
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry_storage.yml
@@ -0,0 +1,4 @@
+---
+- include_tasks: storage/glusterfs.yml
+ when:
+ - openshift_hosted_registry_storage_kind | default(none) == 'glusterfs' or openshift_hosted_registry_storage_glusterfs_swap
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 4e9219477..c2be00d19 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -18,6 +18,7 @@
- name: set_fact replicas
set_fact:
+ # get_router_replicas is a custom filter in role lib_utils
replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
@@ -25,10 +26,10 @@
backup: True
dest: "/etc/origin/master/{{ item | basename }}"
src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
- oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ with_items: "{{ openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') |
+ lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or
- ( openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )
+ ( openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )
# This is for when we desire a cluster signed cert
@@ -55,7 +56,7 @@
when:
- openshift_hosted_router_create_certificate | bool
- openshift_hosted_router_certificate == {}
- - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0
+ - openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0
- name: Create the router service account(s)
oc_serviceaccount:
@@ -98,9 +99,3 @@
ports: "{{ item.ports }}"
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-
-- name: Wait for pod (Routers)
- include_tasks: wait_for_pod.yml
- vars:
- l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
- l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index 18b2edcc6..b39c44b01 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -17,7 +17,7 @@
until:
- "registry_pods.results.results[0]['items'] | count > 0"
# There must be as many matching pods with 'Ready' status True as there are expected replicas
- - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
+ - "registry_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"
delay: 10
retries: "{{ (600 / 10) | int }}"
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index bd7181c17..fef945d51 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: Generate GlusterFS registry endpoints
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
@@ -10,7 +16,14 @@
dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- name: Create GlusterFS registry service and endpoint
- command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
+ command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml
index 056c79334..a14b0febc 100644
--- a/roles/openshift_hosted/tasks/wait_for_pod.yml
+++ b/roles/openshift_hosted/tasks/wait_for_pod.yml
@@ -3,17 +3,17 @@
block:
- name: Ensure OpenShift pod correctly rolls out (best-effort today)
command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \
--namespace {{ item.namespace | default('default') }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig
async: 600
- poll: 15
+ poll: 5
with_items: "{{ l_openshift_hosted_wfp_items }}"
failed_when: false
- name: Determine the latest version of the OpenShift pod deployment
command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \
--namespace {{ item.namespace }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -22,14 +22,14 @@
- name: Poll for OpenShift pod deployment success
command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
--namespace {{ item.0.namespace }} \
--config {{ openshift_master_config_dir }}/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
register: openshift_hosted_wfp_rc_phase
until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout"
- delay: 15
- retries: 40
+ delay: 5
+ retries: 60
failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout"
with_together:
- "{{ l_openshift_hosted_wfp_items }}"
diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml
index f4fd15089..48d62c8df 100644
--- a/roles/openshift_hosted_templates/defaults/main.yml
+++ b/roles/openshift_hosted_templates/defaults/main.yml
@@ -1,5 +1,5 @@
---
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"
+hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted"
hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}"
content_version: "{{ openshift.common.examples_content_version }}"
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
index 4027f524b..d7cc1e288 100644
--- a/roles/openshift_hosted_templates/meta/main.yml
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -11,4 +11,6 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml
index 89b92dfcc..34d39f3a5 100644
--- a/roles/openshift_hosted_templates/tasks/main.yml
+++ b/roles/openshift_hosted_templates/tasks/main.yml
@@ -1,20 +1,25 @@
---
- name: Create local temp dir for OpenShift hosted templates copy
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- become: False
register: copy_hosted_templates_mktemp
run_once: True
# AUDIT:changed_when: not set here because this task actually
# creates something
+- name: Chmod local temp dir for OpenShift examples copy
+ local_action: command chmod 777 "{{ copy_hosted_templates_mktemp.stdout }}"
+ run_once: True
+
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" .
args:
# Disables the following warning:
# Consider using unarchive module rather than running tar
warn: no
- become: False
- register: copy_hosted_templates_tar
+
+- name: Chmod local tar of OpenShift examples
+ local_action: command chmod 744 "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar"
+ run_once: True
- name: Create remote OpenShift hosted templates directory
file:
@@ -28,7 +33,6 @@
dest: "{{ hosted_base }}/"
- name: Cleanup the OpenShift hosted templates temp dir
- become: False
local_action: file dest="{{ copy_hosted_templates_mktemp.stdout }}" state=absent
- name: Modify registry paths if registry_url is not registry.access.redhat.com
@@ -52,7 +56,7 @@
- name: Create or update hosted templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }}
+ {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }}
-f {{ hosted_base }}
--config={{ openshift_hosted_templates_kubeconfig }}
-n openshift
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index f9c16ba40..d8c45fb33 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -2,6 +2,12 @@
r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+openshift_router_image_default_dict:
+ origin: 'openshift/origin-haproxy-router'
+ openshift-enterprise: 'openshift3/ose-haproxy-router'
+openshift_router_image_default: "{{ openshift_router_image_default_dict[openshift_deployment_type] }}"
+openshift_router_image: "{{ openshift_router_image_default }}"
+
haproxy_frontends:
- name: main
binds:
@@ -26,7 +32,7 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
cond: "{{ r_openshift_lb_use_nuage | bool }}"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
# NOTE
# r_openshift_lb_use_nuage_default may be defined external to this role.
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index 72298b599..3b5b45c5f 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
- role: openshift_facts
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 7d23ea6c8..4a11029ab 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -4,33 +4,33 @@
- name: Install haproxy
package: name=haproxy state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
- name: Pull haproxy image
command: >
- docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }}
- when: openshift.common.is_containerized | bool
+ docker pull {{ openshift_router_image }}:{{ openshift_image_tag }}
+ when: openshift_is_containerized | bool
- name: Create config directory for haproxy
file:
path: /etc/haproxy
state: directory
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Create the systemd unit files
template:
src: "haproxy.docker.service.j2"
dest: "/etc/systemd/system/haproxy.service"
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
notify: restart haproxy
- name: Configure systemd service directory for haproxy
file:
path: /etc/systemd/system/haproxy.service.d
state: directory
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
# Work around ini_file create option in 2.2 which defaults to no
- name: Create limits.conf file
@@ -41,7 +41,7 @@
owner: root
group: root
changed_when: false
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Configure the nofile limits for haproxy
ini_file:
@@ -50,7 +50,7 @@
option: LimitNOFILE
value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
notify: restart haproxy
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Configure haproxy
template:
diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index 24fd635ec..de5a8d7c2 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -3,7 +3,7 @@
global
maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }}
log /dev/log local0 info
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin
{% else %}
chroot /var/lib/haproxy
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 0343a7eb0..90111449c 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift_router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 27cfc17d6..a192bd67e 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_external_address`: The IP address that mux will listen
+ on for connections from *external* clients. Default is the default ipv4
+ interface as reported by the `ansible_default_ipv4` fact.
- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index e1a5ea726..247c7e4df 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key):
raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def serviceaccount_name(qualified_sa):
''' Returns the simple name from a fully qualified name '''
return qualified_sa.split(":")[-1]
@@ -102,6 +94,28 @@ def serviceaccount_namespace(qualified_sa, default=None):
return seg[-1]
+def flatten_dict(data, parent_key=None):
+ """ This filter plugin will flatten a dict and its sublists into a single dict
+ """
+ if not isinstance(data, dict):
+ raise RuntimeError("flatten_dict failed, expects to flatten a dict")
+
+ merged = dict()
+
+ for key in data:
+ if parent_key is not None:
+ insert_key = '.'.join((parent_key, key))
+ else:
+ insert_key = key
+
+ if isinstance(data[key], dict):
+ merged.update(flatten_dict(data[key], insert_key))
+ else:
+ merged[insert_key] = data[key]
+
+ return merged
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -112,10 +126,10 @@ class FilterModule(object):
return {
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
- 'map_from_pairs': map_from_pairs,
'min_cpu': min_cpu,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
'serviceaccount_namespace': serviceaccount_namespace,
- 'walk': walk
+ 'walk': walk,
+ "flatten_dict": flatten_dict
}
diff --git a/roles/openshift_logging/library/logging_patch.py b/roles/openshift_logging/library/logging_patch.py
new file mode 100644
index 000000000..d2c0bc456
--- /dev/null
+++ b/roles/openshift_logging/library/logging_patch.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+
+""" Ansible module to help with creating context patch file with whitelisting for logging """
+
+import difflib
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: logging_patch
+
+short_description: This will create a context patch file while giving ability
+ to whitelist some lines (excluding them from comparison)
+
+description:
+ - "To create configmap patches for logging"
+
+author:
+ - Eric Wolinetz ewolinet@redhat.com
+'''
+
+
+EXAMPLES = '''
+- logging_patch:
+ original_file: "{{ tempdir }}/current.yml"
+ new_file: "{{ configmap_new_file }}"
+ whitelist: "{{ configmap_protected_lines | default([]) }}"
+
+'''
+
+
+def account_for_whitelist(file_contents, white_list=None):
+ """ This method will remove lines that contain whitelist values from the content
+ of the file so that we aren't build a patch based on that line
+
+ Usage:
+
+ for file_contents:
+
+ index:
+ number_of_shards: {{ es_number_of_shards | default ('1') }}
+ number_of_replicas: {{ es_number_of_replicas | default ('0') }}
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+
+ and white_list:
+
+ ['number_of_shards', 'number_of_replicas']
+
+
+ We would end up with:
+
+ index:
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+ """
+
+ for line in white_list:
+ file_contents = re.sub(r".*%s:.*\n" % line, "", file_contents)
+
+ return file_contents
+
+
+def run_module():
+ """ The body of the module, we check if the variable name specified as the value
+ for the key is defined. If it is then we use that value as for the original key """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ original_file=dict(type='str', required=True),
+ new_file=dict(type='str', required=True),
+ whitelist=dict(required=False, type='list', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ original_fh = open(module.params['original_file'], "r")
+ original_contents = original_fh.read()
+ original_fh.close()
+
+ original_contents = account_for_whitelist(original_contents, module.params['whitelist'])
+
+ new_fh = open(module.params['new_file'], "r")
+ new_contents = new_fh.read()
+ new_fh.close()
+
+ new_contents = account_for_whitelist(new_contents, module.params['whitelist'])
+
+ uni_diff = difflib.unified_diff(new_contents.splitlines(),
+ original_contents.splitlines(),
+ lineterm='')
+
+ return module.exit_json(changed=False, # noqa: F405
+ raw_patch="\n".join(uni_diff))
+
+
+def main():
+ """ main """
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 98d0d1c4f..37ffb0204 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -204,6 +204,14 @@ class OpenshiftLoggingFacts(OCBaseCommand):
if comp is not None:
self.add_facts_for(comp, "services", name, dict())
+ # pylint: disable=too-many-arguments
+ def facts_from_configmap(self, comp, kind, name, config_key, yaml_file=None):
+ '''Extracts facts in logging namespace from configmap'''
+ if yaml_file is not None:
+ config_facts = yaml.load(yaml_file)
+ self.facts[comp][kind][name][config_key] = config_facts
+ self.facts[comp][kind][name]["raw"] = yaml_file
+
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
@@ -214,7 +222,10 @@ class OpenshiftLoggingFacts(OCBaseCommand):
name = item["metadata"]["name"]
comp = self.comp(name)
if comp is not None:
- self.add_facts_for(comp, "configmaps", name, item["data"])
+ self.add_facts_for(comp, "configmaps", name, dict(item["data"]))
+ if comp in ["elasticsearch", "elasticsearch_ops"]:
+ for config_key in item["data"]:
+ self.facts_from_configmap(comp, "configmaps", name, config_key, item["data"][config_key])
def facts_for_oauthclients(self, namespace):
''' Gathers facts for oauthclients used with logging '''
@@ -265,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
# this needs to end up nested under the service account...
@@ -277,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
# pylint: disable=no-self-use, too-many-return-statements
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index 9c480f73a..01ed4918f 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index 59d6098d4..4a2ee64f0 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index af36d67c6..fbc3e3fd1 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -109,14 +109,14 @@
# remove annotations added by logging
- command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}
register: __logging_ops_projects
- name: Remove Annotation of Operations Projects
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
annotate {{ project }} openshift.io/logging.ui.hostname-
with_items: "{{ __logging_ops_projects.stdout_lines }}"
@@ -126,7 +126,18 @@
- __logging_ops_projects.stderr | length == 0
## EventRouter
-- include_role:
+- import_role:
name: openshift_logging_eventrouter
when:
not openshift_logging_install_eventrouter | default(false) | bool
+
+# Update asset config in openshift-web-console namespace
+- name: Remove Kibana route information from web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_asset_config.yml
+ vars:
+ asset_config_edits:
+ - key: loggingPublicURL
+ value: ""
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 082c0128f..0d7f8c056 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
@@ -139,10 +139,10 @@
# TODO: make idempotent
- name: Generate proxy session
- set_fact: session_secret={{ 200 | oo_random_word}}
+ set_fact: session_secret={{ 200 | lib_utils_oo_random_word}}
check_mode: no
# TODO: make idempotent
- name: Generate oauth client secret
- set_fact: oauth_secret={{ 64 | oo_random_word}}
+ set_fact: oauth_secret={{ 64 | lib_utils_oo_random_word}}
check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
index d6ac88dcc..6e3204589 100644
--- a/roles/openshift_logging/tasks/generate_jks.yaml
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -24,25 +24,21 @@
local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
when: elasticsearch_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
when: logging_es_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
when: system_admin_jks.stat.exists
changed_when: False
- become: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
when: truststore_jks.stat.exists
changed_when: False
- become: no
- name: pulling down signing items from host
fetch:
@@ -61,12 +57,10 @@
vars:
- top_dir: "{{local_tmp.stdout}}"
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- become: no
- name: Run JKS generation script
local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
check_mode: no
- become: no
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- name: Pushing locally generated JKS certs to remote host...
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index bb8ebec6b..f82e55b98 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -1,9 +1,12 @@
---
- name: Gather OpenShift Logging Facts
openshift_logging_facts:
- oc_bin: "{{openshift.common.client_binary}}"
+ oc_bin: "{{openshift_client_binary}}"
openshift_logging_namespace: "{{openshift_logging_namespace}}"
+## This is include vs import because we need access to group/inventory variables
+- include_tasks: set_defaults_from_current.yml
+
- name: Set logging project
oc_project:
state: present
@@ -84,14 +87,14 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
loop_control:
@@ -111,7 +114,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -148,7 +151,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -166,7 +169,7 @@
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
loop_control:
@@ -190,7 +193,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -210,7 +213,7 @@
## Kibana
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -223,7 +226,7 @@
openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_kibana
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -253,7 +256,7 @@
- include_tasks: annotate_ops_projects.yaml
## Curator
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -263,7 +266,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
-- include_role:
+- import_role:
name: openshift_logging_curator
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -281,7 +284,7 @@
- openshift_logging_use_ops | bool
## Mux
-- include_role:
+- import_role:
name: openshift_logging_mux
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -294,7 +297,7 @@
## Fluentd
-- include_role:
+- import_role:
name: openshift_logging_fluentd
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
@@ -305,10 +308,27 @@
## EventRouter
-- include_role:
+- import_role:
name: openshift_logging_eventrouter
when:
openshift_logging_install_eventrouter | default(false) | bool
+# TODO: Remove when asset config is removed from master-config.yaml
- include_tasks: update_master_config.yaml
+
+# Update asset config in openshift-web-console namespace
+- name: Add Kibana route information to web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_console_config.yml
+ vars:
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
+ value: "https://{{ openshift_logging_kibana_hostname }}"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
+ - key: loggingPublicURL
+ value: "https://{{ openshift_logging_kibana_hostname }}"
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 9949bb95d..60cc399fa 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -17,7 +17,11 @@
register: local_tmp
changed_when: False
check_mode: no
- become: no
+
+- name: Chmod local temp directory for doing work in
+ local_action: command chmod 777 "{{ local_tmp.stdout }}"
+ changed_when: False
+ check_mode: no
- include_tasks: install_logging.yaml
when:
@@ -31,4 +35,3 @@
local_action: file path="{{local_tmp.stdout}}" state=absent
tags: logging_cleanup
changed_when: False
- become: no
diff --git a/roles/openshift_logging/tasks/patch_configmap_file.yaml b/roles/openshift_logging/tasks/patch_configmap_file.yaml
new file mode 100644
index 000000000..30087fe6a
--- /dev/null
+++ b/roles/openshift_logging/tasks/patch_configmap_file.yaml
@@ -0,0 +1,35 @@
+---
+## The purpose of this task file is to get a patch that is based on the diff
+## between configmap_current_file and configmap_new_file. The module
+## logging_patch takes the paths of two files to compare and also a list of
+## variables whose line we exclude from the diffs.
+## We then patch the new configmap file so that we can build a configmap
+## using that file later. We then use oc apply to idempotenly modify any
+## existing configmap.
+
+## The following variables are expected to be provided when including this task:
+# __configmap_output -- This is provided to us from patch_configmap_files.yaml
+# it is a dict of the configmap where configmap_current_file exists
+# configmap_current_file -- The name of the data file in the __configmap_output
+# configmap_new_file -- The path to the file that we intend to oc apply later
+# we apply our generated patch to this file.
+# configmap_protected_lines -- The list of variables to exclude from the diff
+
+- copy:
+ content: "{{ __configmap_output.results.results[0]['data'][configmap_current_file] }}"
+ dest: "{{ tempdir }}/current.yml"
+
+- logging_patch:
+ original_file: "{{ tempdir }}/current.yml"
+ new_file: "{{ configmap_new_file }}"
+ whitelist: "{{ configmap_protected_lines | default([]) }}"
+ register: patch_output
+
+- copy:
+ content: "{{ patch_output.raw_patch }}\n"
+ dest: "{{ tempdir }}/patch.patch"
+ when: patch_output.raw_patch | length > 0
+
+- command: >
+ patch --force --quiet -u "{{ configmap_new_file }}" "{{ tempdir }}/patch.patch"
+ when: patch_output.raw_patch | length > 0
diff --git a/roles/openshift_logging/tasks/patch_configmap_files.yaml b/roles/openshift_logging/tasks/patch_configmap_files.yaml
new file mode 100644
index 000000000..74a9cc287
--- /dev/null
+++ b/roles/openshift_logging/tasks/patch_configmap_files.yaml
@@ -0,0 +1,31 @@
+---
+## The purpose of this task file is to take in a list of configmap files provided
+## in the variable configmap_file_names, which correspond to the data sections
+## within a configmap. We iterate over each of these files and create a patch
+## from the diff between current_file and new_file to try to maintain any custom
+## changes that a user may have made to a currently deployed configmap while
+## trying to idempotently update with any role provided files.
+
+## The following variables are expected to be provided when including this task:
+# configmap_name -- This is the name of the configmap that the files exist in
+# configmap_namespace -- The namespace that the configmap lives in
+# configmap_file_names -- This is expected to be passed in as a dict
+# current_file -- The name of the data entry within the configmap
+# new_file -- The file path to the file we are comparing to current_file
+# protected_lines -- List of variables whose line will be excluded when creating a diff
+
+- oc_configmap:
+ name: "{{ configmap_name }}"
+ state: list
+ namespace: "{{ configmap_namespace }}"
+ register: __configmap_output
+
+- when: __configmap_output.results.stderr is undefined
+ include_tasks: patch_configmap_file.yaml
+ vars:
+ configmap_current_file: "{{ configmap_files.current_file }}"
+ configmap_new_file: "{{ configmap_files.new_file }}"
+ configmap_protected_lines: "{{ configmap_files.protected_lines | default([]) }}"
+ with_items: "{{ configmap_file_names }}"
+ loop_control:
+ loop_var: configmap_files
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 00de0ca06..bc817075d 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -27,7 +27,7 @@
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
diff --git a/roles/openshift_logging/tasks/set_defaults_from_current.yml b/roles/openshift_logging/tasks/set_defaults_from_current.yml
new file mode 100644
index 000000000..dde362abe
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_defaults_from_current.yml
@@ -0,0 +1,34 @@
+---
+
+## We are pulling default values from configmaps if they exist already
+## Using conditional_set_fact allows us to set the value of a variable based on
+## the value of another one, if it is already defined. Else we don't set the
+## left hand side (it stays undefined as well).
+
+## conditional_set_fact allows us to specify a fact source, so first we try to
+## set variables in the logging-elasticsearch & logging-elasticsearch-ops configmaps
+## afterwards we set the value of the variable based on the value in the inventory
+## but fall back to using the value from a configmap as a default. If neither is set
+## then the variable remains undefined and the role default will be used.
+
+- conditional_set_fact:
+ facts: "{{ openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch']['elasticsearch.yml'] | flatten_dict }}"
+ vars:
+ __openshift_logging_es_number_of_shards: index.number_of_shards
+ __openshift_logging_es_number_of_replicas: index.number_of_replicas
+ when: openshift_logging_facts['elasticsearch']['configmaps']['logging-elasticsearch'] is defined
+
+- conditional_set_fact:
+ facts: "{{ openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops']['elasticsearch.yml'] | flatten_dict }}"
+ vars:
+ __openshift_logging_es_ops_number_of_shards: index.number_of_shards
+ __openshift_logging_es_ops_number_of_replicas: index.number_of_replicas
+ when: openshift_logging_facts['elasticsearch_ops']['configmaps']['logging-elasticsearch-ops'] is defined
+
+- conditional_set_fact:
+ facts: "{{ hostvars[inventory_hostname] }}"
+ vars:
+ openshift_logging_es_number_of_shards: openshift_logging_es_number_of_shards | __openshift_logging_es_number_of_shards
+ openshift_logging_es_number_of_replicas: openshift_logging_es_number_of_replicas | __openshift_logging_es_number_of_replicas
+ openshift_logging_es_ops_number_of_shards: openshift_logging_es_ops_number_of_shards | __openshift_logging_es_ops_number_of_shards
+ openshift_logging_es_ops_number_of_replicas: openshift_logging_es_ops_number_of_replicas | __openshift_logging_es_ops_number_of_replicas
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index b96b8e29d..c0f42ba97 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -1,4 +1,5 @@
---
+# TODO: Remove when asset config is removed from master-config.yaml
- name: Adding Kibana route information to loggingPublicURL
modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
index d4635aab0..9f7c6341c 100644
--- a/roles/openshift_logging_curator/meta/main.yaml
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index e7ef5ff22..cc68998f5 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -2,7 +2,7 @@
- name: Set default image variables based on deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -54,14 +54,17 @@
- copy:
src: curator.yml
dest: "{{ tempdir }}/curator.yml"
- when: curator_config_contents is undefined
changed_when: no
-- copy:
- content: "{{ curator_config_contents }}"
- dest: "{{ tempdir }}/curator.yml"
- when: curator_config_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-curator"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "config.yaml"
+ new_file: "{{ tempdir }}/curator.yml"
- name: Set Curator configmap
oc_configmap:
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 95bf462d1..df5299a83 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_6"
-__allowed_curator_versions: ["3_5", "3_6", "3_7"]
+__latest_curator_version: "3_9"
+__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
index 6a9a6539c..e93d6b73e 100644
--- a/roles/openshift_logging_elasticsearch/meta/main.yaml
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
index c53a06019..c55e7c5ea 100644
--- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -15,3 +15,5 @@
- fail:
msg: Invalid version specified for Elasticsearch
when: es_version not in __allowed_es_versions
+
+- include_tasks: get_es_version.yml
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
new file mode 100644
index 000000000..16de6f252
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -0,0 +1,42 @@
+---
+- command: >
+ oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _cluster_pods
+
+- name: "Getting ES version for logging-es cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _curl_output
+ when: _cluster_pods.stdout_lines | count > 0
+
+- command: >
+ oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+ register: _ops_cluster_pods
+
+- name: "Getting ES version for logging-es-ops cluster"
+ command: >
+ oc exec {{ _ops_cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+ register: _ops_curl_output
+ when: _ops_cluster_pods.stdout_lines | count > 0
+
+- set_fact:
+ _es_output: "{{ _curl_output.stdout | from_json }}"
+ when: _curl_output.stdout is defined
+
+- set_fact:
+ _es_ops_output: "{{ _ops_curl_output.stdout | from_json }}"
+ when: _ops_curl_output.stdout is defined
+
+- set_fact:
+ _es_installed_version: "{{ _es_output.version.number }}"
+ when:
+ - _es_output is defined
+ - _es_output.version is defined
+ - _es_output.version.number is defined
+
+- set_fact:
+ _es_ops_installed_version: "{{ _es_ops_output.version.number }}"
+ when:
+ - _es_ops_output is defined
+ - _es_ops_output.version is defined
+ - _es_ops_output.version.number is defined
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 5fe683ae5..ff5ad1045 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -15,10 +15,10 @@
elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -32,6 +32,18 @@
- include_tasks: determine_version.yaml
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_installed_version is defined
+ - _es_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
+- set_fact:
+ full_restart_cluster: True
+ when:
+ - _es_ops_installed_version is defined
+ - _es_ops_installed_version.split('.')[0] | int < __es_version.split('.')[0] | int
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -111,7 +123,7 @@
- name: Create logging-metrics-reader-role
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
-n "{{ openshift_logging_elasticsearch_namespace }}"
create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
@@ -168,33 +180,33 @@
when: es_logging_contents is undefined
changed_when: no
-- set_fact:
- __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
- __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
-
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
changed_when: no
-- copy:
- content: "{{ es_logging_contents }}"
- dest: "{{ tempdir }}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
- changed_when: no
-
-- copy:
- content: "{{ es_config_contents }}"
- dest: "{{ tempdir }}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
+# create diff between current configmap files and our current files
+# NOTE: include_role must be used instead of import_role because
+# this task file is looped over from another role.
+- include_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-elasticsearch"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "elasticsearch.yml"
+ new_file: "{{ tempdir }}/elasticsearch.yml"
+ protected_lines: ["number_of_shards", "number_of_replicas"]
+ - current_file: "logging.yml"
+ new_file: "{{ tempdir }}/elasticsearch-logging.yml"
- name: Set ES configmap
oc_configmap:
@@ -352,7 +364,7 @@
delete_after: true
- set_fact:
- es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
when: openshift_logging_elasticsearch_deployment_name == ""
- set_fact:
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index 4a32453e3..6bce13d1d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -1,7 +1,25 @@
---
+# Disable external communication for {{ _cluster_component }}
+- name: Disable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ connection: blocked
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
+
## get all pods for the cluster
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -11,21 +29,42 @@
changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
when: _cluster_pods.stdout_lines | count > 0
+# Flush ES
+- name: "Flushing for logging-{{ _cluster_component }} cluster"
+ command: >
+ oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
+ register: _flush_output
+ changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
+ when:
+ - _cluster_pods.stdout_lines | count > 0
+ - full_restart_cluster | bool
+
- command: >
oc get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
register: _cluster_dcs
+## restart all dcs for full restart
+- name: "Restart ES node {{ _es_node }}"
+ include_tasks: restart_es_node.yml
+ with_items: "{{ _cluster_dcs }}"
+ loop_control:
+ loop_var: _es_node
+ when:
+ - full_restart_cluster | bool
+
## restart the node if it's dc is in the list of nodes to restart?
- name: "Restart ES node {{ _es_node }}"
include_tasks: restart_es_node.yml
with_items: "{{ _restart_logging_nodes }}"
loop_control:
loop_var: _es_node
- when: _es_node in _cluster_dcs.stdout
+ when:
+ - not full_restart_cluster | bool
+ - _es_node in _cluster_dcs.stdout
## we may need a new first pod to run against -- fetch them all again
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -33,3 +72,20 @@
oc exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
register: _enable_output
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
+
+# Reenable external communication for {{ _cluster_component }}
+- name: Reenable external communication for logging-{{ _cluster_component }}
+ oc_service:
+ state: present
+ name: "logging-{{ _cluster_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ _cluster_component }}"
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+ when:
+ - full_restart_cluster | bool
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
index b07b232ce..6d0df40c8 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml
@@ -14,6 +14,8 @@
- _dc_output.results.results[0].status is defined
- _dc_output.results.results[0].status.readyReplicas is defined
- _dc_output.results.results[0].status.readyReplicas > 0
+ - _dc_output.results.results[0].status.updatedReplicas is defined
+ - _dc_output.results.results[0].status.updatedReplicas > 0
retries: 60
delay: 30
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cf6ee36bb..4b189f255 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -50,7 +50,7 @@ spec:
- -provider=openshift
- -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- - -cookie-secret={{ 16 | oo_random_word | b64encode }}
+ - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
- -upstream=https://localhost:9200
- '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
- '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index c8e995146..122231031 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,9 +1,10 @@
---
-__latest_es_version: "3_6"
-__allowed_es_versions: ["3_5", "3_6", "3_7"]
+__latest_es_version: "3_9"
+__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
+__es_version: "2.4.4"
__es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key"
@@ -14,3 +15,4 @@ es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int
es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+full_restart_cluster: False
diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_logging_eventrouter/meta/main.yaml
index 59bf680ce..711bb8f22 100644
--- a/roles/openshift_node_facts/meta/main.yml
+++ b/roles/openshift_logging_eventrouter/meta/main.yaml
@@ -1,10 +1,10 @@
---
galaxy_info:
- author: Andrew Butcher
- description: OpenShift Node Facts
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Eventrouter
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.9
+ min_ansible_version: 2.2
platforms:
- name: EL
versions:
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml
index 96b181d61..31780a343 100644
--- a/roles/openshift_logging_eventrouter/tasks/main.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/main.yaml
@@ -1,8 +1,8 @@
---
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 9b58e4456..87b4204b5 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi
openshift_logging_fluentd_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_cpu_request: 100m
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
index 89c98204f..62f076780 100644
--- a/roles/openshift_logging_fluentd/meta/main.yaml
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
index 12b4f5bfd..2721438f0 100644
--- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -4,8 +4,7 @@
name: "{{ node }}"
kind: node
state: add
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_logging_fluentd_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
# wait half a second between labels
- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
- become: no
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 87eedfb4b..79ebbca08 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -34,10 +34,10 @@
msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues
when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal'
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -108,38 +108,28 @@
dest: "{{ tempdir }}/fluent.conf"
vars:
deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
- when: fluentd_config_contents is undefined
- changed_when: no
- copy:
src: fluentd-throttle-config.yaml
dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is undefined
- changed_when: no
- copy:
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_secureforward_contents is undefined
- changed_when: no
-
-- copy:
- content: "{{ fluentd_config_contents }}"
- dest: "{{ tempdir }}/fluent.conf"
- when: fluentd_config_contents is defined
- changed_when: no
-- copy:
- content: "{{ fluentd_throttle_contents }}"
- dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is defined
- changed_when: no
-
-- copy:
- content: "{{ fluentd_secureforward_contents }}"
- dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_secureforward_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-fluentd"
+ configmap_namespace: "logging"
+ configmap_file_names:
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent.conf"
+ - current_file: "throttle-config.yaml"
+ new_file: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward.conf"
- name: Set Fluentd configmap
oc_configmap:
@@ -182,8 +172,8 @@
app_port: "{{ openshift_logging_fluentd_app_port }}"
ops_host: "{{ openshift_logging_fluentd_ops_host }}"
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
- fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}"
fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index 92a426952..b60da814f 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_6"
-__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
+__latest_fluentd_version: "3_9"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
index d97586a37..d9d76dfe0 100644
--- a/roles/openshift_logging_kibana/meta/main.yaml
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 77bf8042a..3c3bd902e 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -1,9 +1,9 @@
---
# fail is we don't have an endpoint for ES to connect to?
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -69,7 +69,7 @@
# gen session_secret if necessary
- name: Generate session secret
copy:
- content: "{{ 200 | oo_random_word }}"
+ content: "{{ 200 | lib_utils_oo_random_word }}"
dest: "{{ generated_certs_dir }}/session_secret"
when:
- not session_secret_file.stat.exists
@@ -77,7 +77,7 @@
# gen oauth_secret if necessary
- name: Generate oauth secret
copy:
- content: "{{ 64 | oo_random_word }}"
+ content: "{{ 64 | lib_utils_oo_random_word }}"
dest: "{{ generated_certs_dir }}/oauth_secret"
when:
- not oauth_secret_file.stat.exists
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 241877a02..fed926a3b 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_6"
-__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
+__latest_kibana_version: "3_9"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index db6f23126..e87c8d33e 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub
openshift_logging_mux_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_cpu_request: 100m
@@ -30,6 +31,7 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
+openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}"
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
index f271d8d7d..969752f15 100644
--- a/roles/openshift_logging_mux/meta/main.yaml
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 68948bce2..7eba3cda4 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -7,10 +7,10 @@
msg: Operations logs destination is required
when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ var_file_name }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
loop_control:
loop_var: var_file_name
@@ -88,26 +88,24 @@
- copy:
src: fluent.conf
dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is undefined
changed_when: no
- copy:
src: secure-forward.conf
dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_securefoward_contents is undefined
changed_when: no
-- copy:
- content: "{{fluentd_mux_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is defined
- changed_when: no
-
-- copy:
- content: "{{fluentd_mux_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_secureforward_contents is defined
- changed_when: no
+- import_role:
+ name: openshift_logging
+ tasks_from: patch_configmap_files.yaml
+ vars:
+ configmap_name: "logging-mux"
+ configmap_namespace: "{{ openshift_logging_mux_namespace }}"
+ configmap_file_names:
+ - current_file: "fluent.conf"
+ new_file: "{{ tempdir }}/fluent-mux.conf"
+ - current_file: "secure-forward.conf"
+ new_file: "{{ tempdir }}/secure-forward-mux.conf"
- name: Set Mux configmap
oc_configmap:
@@ -150,7 +148,7 @@
port: "{{ openshift_logging_mux_port }}"
targetPort: "mux-forward"
external_ips:
- - "{{ ansible_eth0.ipv4.address }}"
+ - "{{ openshift_logging_mux_external_address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index e7b57f4b5..e87205bad 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_6"
-__allowed_mux_versions: ["3_5", "3_6", "3_7"]
+__latest_mux_version: "3_9"
+__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml
index d90cd28cf..a09808a39 100644
--- a/roles/openshift_manage_node/meta/main.yml
+++ b/roles/openshift_manage_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index a15f336e4..9251d380b 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -18,7 +18,7 @@
retries: 120
delay: 1
changed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
delegate_to: "{{ openshift_master_host }}"
run_once: true
@@ -50,10 +50,9 @@
name: "{{ openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_node_labels | lib_utils_oo_dict_to_list_of_dict }}"
namespace: default
when:
- "'nodename' in openshift.node"
- - "'labels' in openshift.node"
- - openshift.node.labels != {}
+ - openshift_node_labels | default({}) != {}
delegate_to: "{{ openshift_master_host }}"
diff --git a/roles/openshift_manageiq/meta/main.yml b/roles/openshift_manageiq/meta/main.yml
index 6c96a91bf..5c9481430 100644
--- a/roles/openshift_manageiq/meta/main.yml
+++ b/roles/openshift_manageiq/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
index 24b2ce6ac..357e6a710 100644
--- a/roles/openshift_management/tasks/add_container_provider.yml
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -1,6 +1,6 @@
---
- name: Ensure OpenShift facts module is available
- include_role:
+ import_role:
role: openshift_facts
- name: Ensure OpenShift facts are loaded
@@ -27,7 +27,7 @@
- name: Ensure the management SA bearer token is identified
set_fact:
- management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+ management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
- name: Ensure the SA bearer token value is read
oc_secret:
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
index f212dba7c..c4b204b98 100644
--- a/roles/openshift_management/tasks/main.yml
+++ b/roles/openshift_management/tasks/main.yml
@@ -8,7 +8,7 @@
# This creates a service account allowing Container Provider
# integration (managing OCP/Origin via MIQ/Management)
- name: Enable Container Provider Integration
- include_role:
+ import_role:
role: openshift_manageiq
- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
index 94e11137c..9e3a4d43a 100644
--- a/roles/openshift_management/tasks/storage/nfs.yml
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -5,14 +5,14 @@
- name: Setting up NFS storage
block:
- name: Include the NFS Setup role tasks
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: setup
vars:
l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
- name: Create the App export
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: create_export
vars:
@@ -22,7 +22,7 @@
l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
- name: Create the DB export
- include_role:
+ import_role:
role: openshift_nfs
tasks_from: create_export
vars:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index efd119299..7d96a467e 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -7,6 +7,12 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+osm_image_default_dict:
+ origin: 'openshift/origin'
+ openshift-enterprise: 'openshift3/ose'
+osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}"
+osm_image: "{{ osm_image_default }}"
+
system_images_registry_dict:
openshift-enterprise: "registry.access.redhat.com"
origin: "docker.io"
@@ -47,12 +53,12 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False)) }}"
containerized_svc_dir: "/usr/lib/systemd/system"
ha_svc_template_path: "native-cluster"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig"
loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
@@ -76,6 +82,15 @@ openshift_master_valid_grant_methods:
openshift_master_is_scaleup_host: False
+# openshift_master_oauth_template is deprecated. Should be added to deprecations
+# and removed.
+openshift_master_oauth_template: False
+openshift_master_oauth_templates_default:
+ login: "{{ openshift_master_oauth_template }}"
+openshift_master_oauth_templates: "{{ openshift_master_oauth_template | ternary(openshift_master_oauth_templates_default, False) }}"
+# Here we combine openshift_master_oath_template into 'login' key of openshift_master_oath_templates, if not present.
+l_openshift_master_oauth_templates: "{{ openshift_master_oauth_templates | default(openshift_master_oauth_templates_default) }}"
+
# These defaults assume forcing journald persistence, fsync to disk once
# a second, rate-limiting to 10,000 logs a second, no forwarding to
# syslog or wall, using 8GB of disk space maximum, using 10MB journal
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index bf0cbbf18..3460efec9 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -14,5 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: lib_os_firewall
- role: openshift_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 7bfc870d5..b12a6b346 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -16,10 +16,10 @@
- name: Install Master package
package:
- name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- - not openshift.common.is_containerized | bool
+ - not openshift_is_containerized | bool
register: result
until: result is succeeded
@@ -31,12 +31,12 @@
owner: root
group: root
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: Reload systemd units
command: systemctl daemon-reload
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: Re-gather package dependent master facts
openshift_facts:
@@ -48,7 +48,7 @@
- name: Create the policy file if it does not already exist
command: >
- {{ openshift.common.client_binary }} adm create-bootstrap-policy-file
+ {{ openshift_client_binary }} adm create-bootstrap-policy-file
--filename={{ openshift_master_policy }}
args:
creates: "{{ openshift_master_policy }}"
@@ -69,7 +69,7 @@
package: name=httpd-tools state=present
when:
- item.kind == 'HTPasswdPasswordIdentityProvider'
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
register: result
until: result is succeeded
@@ -164,7 +164,7 @@
- name: Install Master system container
include_tasks: system_container.yml
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- l_is_master_system_container | bool
- name: Create session secrets file
@@ -181,6 +181,7 @@
- restart master api
- set_fact:
+ # translate_idps is a custom filter in role lib_utils
translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 8b342a5b4..911a9bd3d 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -43,7 +43,7 @@
set_fact:
l_bind_docker_reg_auth: True
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- oreg_auth_user is defined
- >
(master_oreg_auth_credentials_stat.stat.exists
diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml
index 487fefb63..7e013a699 100644
--- a/roles/openshift_master/tasks/set_loopback_context.yml
+++ b/roles/openshift_master/tasks/set_loopback_context.yml
@@ -1,13 +1,13 @@
---
- name: Test local loopback context
command: >
- {{ openshift.common.client_binary }} config view
+ {{ openshift_client_binary }} config view
--config={{ openshift_master_loopback_config }}
changed_when: false
register: l_loopback_config
- command: >
- {{ openshift.common.client_binary }} config set-cluster
+ {{ openshift_client_binary }} config set-cluster
--certificate-authority={{ openshift_master_config_dir }}/ca.crt
--embed-certs=true --server={{ openshift.master.loopback_api_url }}
{{ openshift.master.loopback_cluster_name }}
@@ -17,7 +17,7 @@
register: set_loopback_cluster
- command: >
- {{ openshift.common.client_binary }} config set-context
+ {{ openshift_client_binary }} config set-context
--cluster={{ openshift.master.loopback_cluster_name }}
--namespace=default --user={{ openshift.master.loopback_user }}
{{ openshift.master.loopback_context_name }}
@@ -27,7 +27,7 @@
register: l_set_loopback_context
- command: >
- {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
+ {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}
--config={{ openshift_master_loopback_config }}
when:
- l_set_loopback_context is changed
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index f6c5ce0dd..dcbf7fd9f 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -2,7 +2,7 @@
- name: Pre-pull master system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Pulling layer' in l_pull_result.stdout"
@@ -14,7 +14,7 @@
- name: Install or Update HA api master system container
oc_atomic_container:
name: "{{ openshift_service_type }}-master-api"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=api
@@ -22,7 +22,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
name: "{{ openshift_service_type }}-master-controllers"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 1c9ecafaa..870ab7c57 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -7,7 +7,7 @@
containerized_svc_dir: "/etc/systemd/system"
ha_svc_template_path: "docker-cluster"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- include_tasks: registry_auth.yml
@@ -30,11 +30,11 @@
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
command: >
- docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
+ docker pull {{ osm_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Downloaded newer image' in l_pull_result.stdout"
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_master_system_container | bool
- name: Create the ha systemd unit files
diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml
index f84cf2f6e..f143673cf 100644
--- a/roles/openshift_master/tasks/upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade.yml
@@ -1,6 +1,6 @@
---
- include_tasks: upgrade/rpm_upgrade.yml
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- include_tasks: upgrade/upgrade_scheduler.yml
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index f72710832..4564f33dd 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -8,8 +8,25 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
+- name: Upgrade master packages - yum
+ command:
+ yum install -y {{ master_pkgs | join(' ') }} \
+ {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
+ vars:
+ master_pkgs:
+ - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
+ register: result
+ until: result is succeeded
+ when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+ dnf:
+ name: "{{ master_pkgs | join(',') }}"
+ state: present
vars:
master_pkgs:
- "{{ openshift_service_type }}{{ openshift_pkg_version }}"
@@ -17,6 +34,6 @@
- "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result is succeeded
+ when: ansible_pkg_mgr == 'dnf'
diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
index 8558bf3e9..995a5ab70 100644
--- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
+++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
@@ -1,6 +1,8 @@
---
# Upgrade predicates
- vars:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 3f7a528a9..4c68155ea 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 5e46d9121..a56c0340c 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -21,7 +21,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \
+ {{ osm_image }}:${IMAGE_VERSION} start master api \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 899575f1a..79171d511 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \
+ {{ osm_image }}:${IMAGE_VERSION} start master controllers \
--config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index f1a76e5f5..14023ea73 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,6 +1,6 @@
admissionConfig:
{% if 'admission_plugin_config' in openshift.master %}
- pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }}
+ pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }}
{% endif %}
apiLevels:
- v1
@@ -16,13 +16,13 @@ assetConfig:
metricsPublicURL: {{ openshift_hosted_metrics_deploy_url }}
{% endif %}
{% if 'extension_scripts' in openshift.master %}
- extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }}
+ extensionScripts: {{ openshift.master.extension_scripts | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
{% if 'extension_stylesheets' in openshift.master %}
- extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }}
+ extensionStylesheets: {{ openshift.master.extension_stylesheets | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
{% if 'extensions' in openshift.master %}
- extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }}
+ extensions: {{ openshift.master.extensions | lib_utils_to_padded_yaml(1, 2) }}
{% endif %}
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
@@ -42,7 +42,7 @@ assetConfig:
{% endfor %}
{% endif %}
{% if openshift.master.audit_config | default(none) is not none %}
-auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
+auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }}
{% endif %}
controllerConfig:
election:
@@ -85,7 +85,7 @@ imageConfig:
format: {{ openshift.master.registry_url }}
latest: {{ openshift_master_image_config_latest }}
{% if 'image_policy_config' in openshift.master %}
-imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}
+imagePolicyConfig:{{ openshift.master.image_policy_config | lib_utils_to_padded_yaml(level=1) }}
{% endif %}
kind: MasterConfig
kubeletClientInfo:
@@ -96,21 +96,21 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
- apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+ apiServerArguments: {{ openshift.master.api_server_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
storage-backend:
- etcd3
storage-media-type:
- application/vnd.kubernetes.protobuf
{% endif %}
- controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
+ controllerArguments: {{ openshift.master.controller_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
- schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
+ schedulerArguments: {{ openshift_master_scheduler_args | default(None) | lib_utils_to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"
servicesSubnet: {{ openshift.common.portal_net }}
@@ -144,7 +144,7 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
- externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
+ externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | lib_utils_to_padded_yaml(1,2) }}
{% if openshift_master_ingress_ip_network_cidr is defined %}
ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}
{% endif %}
@@ -152,8 +152,8 @@ oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
{% endif %}
-{% if 'oauth_templates' in openshift.master %}
- templates:{{ openshift.master.oauth_templates | to_padded_yaml(level=2) }}
+{% if l_openshift_master_oauth_templates %}
+ templates:{{ l_openshift_master_oauth_templates | lib_utils_to_padded_yaml(level=2) }}
{% endif %}
assetPublicURL: {{ openshift.master.public_console_url }}/
grantConfig:
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index cc21b37af..bff32b2e3 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 493fc510e..b8a519baa 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
{% elif openshift_push_via_dns | default(false) %}
OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
{% endif %}
-{% if openshift.common.is_containerized | bool %}
+{% if openshift_is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index 300b2cbff..e7d9f5bba 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index ec1fbb1ee..ce27e238f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -27,7 +27,7 @@
master_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool
else (False in (g_master_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Ensure the generated_configs directory present
@@ -47,11 +47,11 @@
- name: Create the master server certificate
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
@@ -64,16 +64,16 @@
--overwrite=false
when: item != openshift_ca_host
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- name: Generate the loopback master client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
--certificate-authority={{ openshift_ca_cert }}
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
@@ -89,8 +89,8 @@
args:
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_masters_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
when: item != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
run_once: true
@@ -101,6 +101,7 @@
state: hard
force: true
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
@@ -120,7 +121,11 @@
register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
- become: no
+
+- name: Chmod local temp directory for syncing certs
+ local_action: command chmod 777 "{{ g_master_certs_mktemp.stdout }}"
+ changed_when: False
+ when: master_certs_missing | bool
- name: Create a tarball of the master certs
command: >
@@ -157,7 +162,6 @@
local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent
changed_when: False
when: master_certs_missing | bool
- become: no
- name: Lookup default group for ansible_ssh_user
command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"
diff --git a/roles/openshift_master_facts/filter_plugins/oo_filters.py b/roles/openshift_master_facts/filter_plugins/oo_filters.py
deleted file mode 120000
index 6f9bc47c1..000000000
--- a/roles/openshift_master_facts/filter_plugins/oo_filters.py
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/oo_filters.py \ No newline at end of file
diff --git a/roles/openshift_master_facts/meta/main.yml b/roles/openshift_master_facts/meta/main.yml
index 9dbf719f8..0ab2311d3 100644
--- a/roles/openshift_master_facts/meta/main.yml
+++ b/roles/openshift_master_facts/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 418dcba67..f450c916a 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -15,7 +15,7 @@
set_fact:
g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
| default('hawkular-metrics.' ~ openshift_master_default_subdomain)
- | oo_hostname_from_url }}"
+ | lib_utils_oo_hostname_from_url }}"
- set_fact:
openshift_hosted_metrics_deploy_url: "https://{{ g_metrics_hostname }}/hawkular/metrics"
@@ -57,6 +57,7 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ # oo_htpasswd_users_from_file is a custom filter in role lib_utils
htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}"
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
@@ -72,11 +73,8 @@
controller_args: "{{ osm_controller_args | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
master_count: "{{ openshift_master_count | default(None) }}"
- master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
- oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}"
@@ -93,6 +91,8 @@
- name: Set Default scheduler predicates and priorities
set_fact:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index 50214135c..675ec112f 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -15,5 +15,6 @@ galaxy_info:
categories:
- openshift
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_openshift
+- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index bb842d710..b71e35263 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.client_binary }} adm ca create-signer-cert
+ {{ openshift_client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 0fd19c9f8..9395fceca 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -14,7 +14,7 @@
changed_when: no
- name: generate password for hawkular metrics
- local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | lib_utils_oo_random_word }}"
with_items:
- hawkular-metrics
become: false
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 48584bd64..9026cc897 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -1,6 +1,6 @@
---
- shell: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0
vars:
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
index a4ffa1890..f45e7a042 100644
--- a/roles/openshift_metrics/tasks/install_hawkular.yaml
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc hawkular-metrics -o jsonpath='{.spec.replicas}'
register: hawkular_metrics_replica_count
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index a33b28ba7..73e7454f0 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get rc heapster -o jsonpath='{.spec.replicas}'
register: heapster_replica_count
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 49d1d8cf1..4a63d081e 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -67,10 +67,27 @@
with_items: "{{ hawkular_agent_object_defs.results }}"
when: openshift_metrics_install_hawkular_agent | bool
+# TODO: Remove when asset config is removed from master-config.yaml
- include_tasks: update_master_config.yaml
+# Update asset config in openshift-web-console namespace
+- name: Add metrics route information to web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_console_config.yml
+ vars:
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
+ value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ # Continue to set the old deprecated property until the
+ # origin-web-console image is updated for the new name.
+ # This will be removed in a future pull.
+ - key: metricsPublicURL
+ value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
+ when: openshift_web_console_install | default(true) | bool
+
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 9dfe360bb..b67077bca 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -9,10 +9,10 @@
- "'not installed' not in passlib_result.stdout"
msg: "python-passlib rpm must be installed on control host"
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: Set metrics image facts
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 1e1af40e8..057963c1a 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -1,7 +1,7 @@
---
- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }}
+ {{ openshift_client_binary }}
--config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
@@ -12,21 +12,25 @@
- name: Applying {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when: "'error' in version_changed.stderr"
+ failed_when:
+ - "'error' in version_changed.stderr"
+ - "version_changed.rc != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index d6756f9b9..976763236 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -14,7 +14,7 @@
- name: list existing secrets
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }}
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }}
--config={{ mktemp.stdout }}/admin.kubeconfig
get secrets -o name
register: metrics_secrets
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 2d880f4d6..223bd975e 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.client_binary }} adm ca create-server-cert
+ {{ openshift_client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
@@ -23,7 +23,7 @@
- name: generate random password for the {{ component }} keystore
copy:
- content: "{{ 15 | oo_random_word }}"
+ content: "{{ 15 | lib_utils_oo_random_word }}"
dest: '{{ mktemp.stdout }}/{{ component }}-keystore.pwd'
- slurp: src={{ mktemp.stdout | quote }}/{{ component|quote }}-keystore.pwd
@@ -39,5 +39,5 @@
- name: generate random password for the {{ component }} truststore
copy:
- content: "{{ 15 | oo_random_word }}"
+ content: "{{ 15 | lib_utils_oo_random_word }}"
dest: '{{ mktemp.stdout | quote }}/{{ component|quote }}-truststore.pwd'
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index 2037e8dc3..899251727 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-cassandra
@@ -23,7 +23,7 @@
changed_when: metrics_cassandra_rc | length > 0
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-metrics
@@ -45,7 +45,7 @@
changed_when: metrics_metrics_rc | length > 0
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=heapster
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index 9a2ce9267..4b1d7119d 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -1,6 +1,6 @@
---
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=heapster
@@ -22,7 +22,7 @@
loop_var: object
- command: >
- {{openshift.common.client_binary}}
+ {{openshift_client_binary}}
--config={{mktemp.stdout}}/admin.kubeconfig
get rc
-l metrics-infra=hawkular-metrics
@@ -44,7 +44,7 @@
changed_when: metrics_hawkular_rc | length > 0
- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
+ {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
get rc
-o name
-l metrics-infra=hawkular-cassandra
diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
index 42ed02460..ae3306496 100644
--- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml
@@ -1,7 +1,7 @@
---
- name: remove Hawkular Agent (HOSA) components
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra=agent
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
register: delete_metrics
@@ -9,7 +9,7 @@
- name: remove rolebindings
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found
clusterrolebinding/hawkular-openshift-agent-rb
changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 1265c7bfd..610c7b4e5 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -4,7 +4,7 @@
- name: remove metrics components
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found --selector=metrics-infra
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
register: delete_metrics
@@ -12,9 +12,20 @@
- name: remove rolebindings
command: >
- {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
+
+# Update asset config in openshift-web-console namespace
+- name: Remove metrics route information from web console asset config
+ include_role:
+ name: openshift_web_console
+ tasks_from: update_asset_config.yml
+ vars:
+ asset_config_edits:
+ - key: metricsPublicURL
+ value: ""
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
index 5059d8d94..6567fcb4f 100644
--- a/roles/openshift_metrics/tasks/update_master_config.yaml
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -1,4 +1,5 @@
---
+# TODO: Remove when asset config is removed from master-config.yaml
- name: Adding metrics route information to metricsPublicURL
modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index e976bc222..7c75b2f97 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -64,7 +64,7 @@ spec:
- name: MASTER_URL
value: "{{ openshift_metrics_master_url }}"
- name: JGROUPS_PASSWORD
- value: "{{ 17 | oo_random_word }}"
+ value: "{{ 17 | lib_utils_oo_random_word }}"
- name: TRUSTSTORE_AUTHORITIES
value: "/hawkular-metrics-certs/tls.truststore.crt"
- name: ENABLE_PROMETHEUS_ENDPOINT
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
deleted file mode 100644
index 6ed6d404c..000000000
--- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use with openshift named certificates
-'''
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use with openshift named certificates'''
-
- @staticmethod
- def oo_named_certificates_list(named_certificates):
- ''' Returns named certificates list with correct fields for the master
- config file.'''
- return [{'certFile': named_certificate['certfile'],
- 'keyFile': named_certificate['keyfile'],
- 'names': named_certificate['names']} for named_certificate in named_certificates]
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_named_certificates/meta/main.yml b/roles/openshift_named_certificates/meta/main.yml
index 2c6e12494..e7d81df53 100644
--- a/roles/openshift_named_certificates/meta/main.yml
+++ b/roles/openshift_named_certificates/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
- system
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml
index 1bcf9ef67..021fa8385 100644
--- a/roles/openshift_named_certificates/tasks/main.yml
+++ b/roles/openshift_named_certificates/tasks/main.yml
@@ -1,9 +1,8 @@
---
- set_fact:
- parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"
+ parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"
when: named_certificates | length > 0
delegate_to: localhost
- become: no
run_once: true
- openshift_facts:
@@ -43,4 +42,4 @@
src: "{{ item }}"
dest: "{{ named_certs_dir }}/{{ item | basename }}"
mode: 0600
- with_items: "{{ named_certificates | oo_collect('cafile') }}"
+ with_items: "{{ named_certificates | lib_utils_oo_collect('cafile') }}"
diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml
index d7b5910f2..17c0cf33f 100644
--- a/roles/openshift_nfs/meta/main.yml
+++ b/roles/openshift_nfs/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
index 5fcdbf76e..331685289 100644
--- a/roles/openshift_nfs/tasks/create_export.yml
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -3,7 +3,7 @@
#
# Include signature
#
-# include_role:
+# import_role:
# role: openshift_nfs
# tasks_from: create_export
# vars:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index fff927944..0b10413c5 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,6 +1,70 @@
---
openshift_node_debug_level: "{{ debug_level | default(2) }}"
-
+openshift_node_iptables_sync_period: '30s'
+osn_storage_plugin_deps:
+- ceph
+- glusterfs
+- iscsi
+openshift_node_local_quota_per_fsgroup: ""
+openshift_node_proxy_mode: iptables
+openshift_set_node_ip: False
+openshift_config_base: '/etc/origin'
+
+openshift_oreg_url_default_dict:
+ origin: "openshift/origin-${component}:${version}"
+ openshift-enterprise: "openshift3/ose-${component}:${version}"
+openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}"
+oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}"
+
+osn_ovs_image_default_dict:
+ origin: "openshift/openvswitch"
+ openshift-enterprise: "openshift3/openvswitch"
+osn_ovs_image_default: "{{ osn_ovs_image_default_dict[openshift_deployment_type] }}"
+osn_ovs_image: "{{ osn_ovs_image_default }}"
+
+openshift_dns_ip: "{{ ansible_default_ipv4['address'] }}"
+
+openshift_node_env_vars: {}
+
+# Create list of 'k=v' pairs.
+l_node_kubelet_node_labels: "{{ openshift_node_labels | default({}) | lib_utils_oo_dict_to_keqv_list }}"
+
+openshift_node_kubelet_args_dict:
+ aws:
+ cloud-provider:
+ - aws
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/aws.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ openstack:
+ cloud-provider:
+ - openstack
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/openstack.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ gce:
+ cloud-provider:
+ - gce
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ azure:
+ cloud-provider:
+ - azure
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+ undefined:
+ node-labels: "{{ l_node_kubelet_node_labels }}"
+
+l_node_kubelet_args_default: "{{ openshift_node_kubelet_args_dict[openshift_cloudprovider_kind | default('undefined')] }}"
+
+l_openshift_node_kubelet_args: "{{ openshift_node_kubelet_args | default({}) }}"
+# Combine the default kubelet_args dictionary (based on cloud provider, if provided)
+# with user-supplied openshift_node_kubelet_args.
+# openshift_node_kubelet_args will override the defaults, if keys and/or subkeys
+# are present in both.
+l2_openshift_node_kubelet_args: "{{ l_node_kubelet_args_default | combine(l_openshift_node_kubelet_args, recursive=True) }}"
openshift_node_dnsmasq_install_network_manager_hook: true
# lo must always be present in this list or dnsmasq will conflict with
@@ -14,10 +78,15 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+
+openshift_node_image_dict:
+ origin: 'openshift/node'
+ openshift-enterprise: 'openshift3/node'
+osn_image: "{{ openshift_node_image_dict[openshift_deployment_type] }}"
+
openshift_service_type_dict:
origin: origin
openshift-enterprise: atomic-openshift
-
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
system_images_registry_dict:
@@ -106,9 +175,9 @@ oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
openshift_use_crio: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False) | bool) or (openshift_use_crio_only | default(False) | bool) }}"
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
# NOTE
# r_openshift_node_*_default may be defined external to this role.
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 62e0e1341..779916335 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -34,7 +34,7 @@
pause: seconds=15
when:
- (not skip_node_svc_handlers | default(False) | bool)
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- name: restart node
systemd:
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 70057c7f3..59e743dce 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,12 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_node_facts
- when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_openshift
-- role: lib_os_firewall
- when: not (openshift_node_upgrade_in_progress | default(False))
-- role: openshift_cloud_provider
- when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_utils
- when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 8a55cd428..1103fe4c9 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -4,7 +4,7 @@
- name: Pull container images
include_tasks: container_images.yml
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- name: Start and enable openvswitch service
systemd:
@@ -13,7 +13,7 @@
state: started
daemon_reload: yes
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not (ovs_start_result is failed)
@@ -24,9 +24,9 @@
ovs_service_status_changed: "{{ ovs_start_result is changed }}"
- file:
- dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ dest: "{{ l2_openshift_node_kubelet_args['config'] }}"
state: directory
- when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+ when: ('config' in l2_openshift_node_kubelet_args) | bool
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
@@ -46,7 +46,7 @@
regexp: "^{{ item.key }}="
line: "{{ item.key }}={{ item.value }}"
create: true
- with_dict: "{{ openshift.node.env_vars | default({}) }}"
+ with_dict: "{{ openshift_node_env_vars }}"
notify:
- restart node
@@ -58,7 +58,7 @@
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
- when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not openshift_node_bootstrap
block:
- name: Wait for master API to become available before proceeding
diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml
index 0b8c806ae..bb788e2f1 100644
--- a/roles/openshift_node/tasks/container_images.yml
+++ b/roles/openshift_node/tasks/container_images.yml
@@ -12,7 +12,7 @@
- name: Pre-pull openvswitch image
command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
when:
diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml
index 0c8857b11..5e06ba032 100644
--- a/roles/openshift_node/tasks/dnsmasq_install.yml
+++ b/roles/openshift_node/tasks/dnsmasq_install.yml
@@ -12,7 +12,7 @@
- name: Install dnsmasq
package: name=dnsmasq state=installed
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index b1fcf4068..a4a9c1237 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,35 +1,25 @@
---
-- when: not openshift.common.is_containerized | bool
- block:
- - name: Install Node package
- package:
- name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- register: result
- until: result is succeeded
-
- - name: Install sdn-ovs package
- package:
- name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - openshift_node_use_openshift_sdn | bool
- register: result
- until: result is succeeded
-
- - name: Install conntrack-tools package
- package:
- name: "conntrack-tools"
- state: present
- register: result
- until: result is succeeded
+- name: Install Node package, sdn-ovs, conntrack packages
+ package:
+ name: "{{ item.name }}"
+ state: present
+ register: result
+ until: result is succeeded
+ with_items:
+ - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ install: "{{ openshift_node_use_openshift_sdn | bool }}"
+ - name: "conntrack-tools"
+ when:
+ - not openshift_is_containerized | bool
+ - item['install'] | default(True) | bool
- when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8bd8f2536..754ecacaf 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -3,8 +3,8 @@
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled')
- - deployment_type == 'openshift-enterprise'
- - not openshift_use_crio
+ - openshift_deployment_type == 'openshift-enterprise'
+ - not openshift_use_crio | bool
- include_tasks: dnsmasq_install.yml
- include_tasks: dnsmasq.yml
@@ -50,7 +50,7 @@
name: cri-o
enabled: yes
state: restarted
- when: openshift_use_crio
+ when: openshift_use_crio | bool
register: task_result
failed_when:
- task_result is failed
@@ -85,21 +85,17 @@
- name: GlusterFS storage plugin configuration
include_tasks: storage_plugins/glusterfs.yml
- when: "'glusterfs' in openshift.node.storage_plugin_deps"
+ when: "'glusterfs' in osn_storage_plugin_deps"
- name: Ceph storage plugin configuration
include_tasks: storage_plugins/ceph.yml
- when: "'ceph' in openshift.node.storage_plugin_deps"
+ when: "'ceph' in osn_storage_plugin_deps"
- name: iSCSI storage plugin configuration
include_tasks: storage_plugins/iscsi.yml
- when: "'iscsi' in openshift.node.storage_plugin_deps"
+ when: "'iscsi' in osn_storage_plugin_deps"
##### END Storage #####
- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
-
-- name: include bootstrap node config
- include_tasks: bootstrap.yml
- when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 98978ec6f..06b879050 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -2,14 +2,14 @@
- name: Pre-pull node system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update node system container
oc_atomic_container:
name: "{{ openshift_service_type }}-node"
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index b61bc84c1..d7dce6969 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,22 +1,22 @@
---
- set_fact:
l_service_name: "cri-o"
- when: openshift_use_crio
+ when: openshift_use_crio | bool
- set_fact:
l_service_name: "{{ openshift_docker_service_name }}"
- when: not openshift_use_crio
+ when: not openshift_use_crio | bool
- name: Pre-pull OpenVSwitch system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update OpenVSwitch system container
oc_atomic_container:
name: openvswitch
- image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}"
state: latest
values:
- "DOCKER_SERVICE={{ l_service_name }}"
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index ab43ec049..92650e6b7 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -41,7 +41,7 @@
set_fact:
l_bind_docker_reg_auth: True
when:
- - openshift.common.is_containerized | bool
+ - openshift_is_containerized | bool
- oreg_auth_user is defined
- >
(node_oreg_auth_credentials_stat.stat.exists
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index 52d80357e..e30f58a9a 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -1,6 +1,6 @@
---
- name: Install Ceph storage plugin dependencies
package: name=ceph-common state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index e60f57ae7..c04a6922a 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -1,7 +1,7 @@
---
- name: Install GlusterFS storage plugin dependencies
package: name=glusterfs-fuse state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index d3a3668d5..a8048c42f 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,6 +1,6 @@
---
- name: Install iSCSI storage plugin dependencies
package: name=iscsi-initiator-utils state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 1484aa076..c2922644f 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -1,7 +1,7 @@
---
- name: Install NFS storage plugin dependencies
package: name=nfs-utils state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift_is_atomic | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 262ee698b..e33a4999f 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -2,13 +2,13 @@
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
- src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
-- when: openshift.common.is_containerized | bool
+- when: openshift_is_containerized | bool
block:
- name: include node deps docker service file
include_tasks: config/install-node-deps-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index f0a013e45..02e417937 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -1,11 +1,10 @@
---
# input variables:
# - l_docker_upgrade
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# - node_config_hook
# - openshift_pkg_version
-# - openshift.common.is_containerized
-# - deployment_type
+# - openshift_is_containerized
# - openshift_release
# tasks file for openshift_node_upgrade
@@ -26,7 +25,7 @@
include_tasks: upgrade/rpm_upgrade_install.yml
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- include_tasks: "{{ node_config_hook }}"
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
index 439700df6..dd9183382 100644
--- a/roles/openshift_node/tasks/upgrade/config_changes.yml
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -1,7 +1,7 @@
---
- name: Update systemd units
include_tasks: ../systemd_units.yml
- when: openshift.common.is_containerized
+ when: openshift_is_containerized | bool
- name: Update oreg value
yedit:
@@ -21,6 +21,12 @@
path: "/var/lib/dockershim/sandbox/"
state: absent
+# https://bugzilla.redhat.com/show_bug.cgi?id=1518912
+- name: Clean up IPAM data
+ file:
+ path: "/var/lib/cni/networks/openshift-sdn/"
+ state: absent
+
# Disable Swap Block (pre)
- block:
- name: Remove swap entries from /etc/fstab
@@ -60,6 +66,7 @@
dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service"
src: "node.service.j2"
register: l_node_unit
+ when: not openshift_is_containerized | bool
- name: Reset selinux context
command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
@@ -74,4 +81,3 @@
# require a service to be part of the call.
- name: Reload systemd units
command: systemctl daemon-reload
- when: l_node_unit is changed
diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
index 71f00dcd2..e5477f389 100644
--- a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml
@@ -1,15 +1,15 @@
---
- name: Pre-pull node image
command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- name: Pre-pull openvswitch image
command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift_use_openshift_sdn | bool
+ when: openshift_node_use_openshift_sdn | bool
- include_tasks: ../container_images.yml
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index 45b0be0a0..bd6f42182 100644
--- a/roles/openshift_node/tasks/upgrade/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -1,7 +1,7 @@
---
# input variables:
# - openshift_service_type
-# - openshift.common.is_containerized
+# - openshift_is_containerized
# - openshift.common.hostname
# - openshift.master.api_port
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index cc9a8f2d9..d4b47bb9e 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -3,7 +3,7 @@
# - openshift_service_type
# - component
# - openshift_pkg_version
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# Pre-pull new node rpm, but don't install
- name: download new node packages
@@ -12,7 +12,7 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "dnsmasq"
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
index 32eeb76c6..ef5d8d662 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -3,7 +3,7 @@
# - openshift_service_type
# - component
# - openshift_pkg_version
-# - openshift.common.is_atomic
+# - openshift_is_atomic
# Install the pre-pulled RPM
# Note: dnsmasq is covered in it's own play. openvswitch is included here
@@ -14,6 +14,6 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "openvswitch"
diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml
index 2fff556e5..6d92516c3 100644
--- a/roles/openshift_node/tasks/upgrade/stop_services.yml
+++ b/roles/openshift_node/tasks/upgrade/stop_services.yml
@@ -19,7 +19,7 @@
- "{{ openshift_service_type }}-master-controllers"
- "{{ openshift_service_type }}-node"
failed_when: false
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
- service:
name: docker
@@ -40,4 +40,4 @@
- "{{ openshift_service_type }}-node"
- openvswitch
failed_when: false
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml
index 7f591996c..3ae7dc6b6 100644
--- a/roles/openshift_node/tasks/upgrade_pre.yml
+++ b/roles/openshift_node/tasks/upgrade_pre.yml
@@ -11,7 +11,7 @@
command: "{{ ansible_pkg_mgr }} makecache"
register: result
until: result is succeeded
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
- name: Check Docker image count
shell: "docker images -aq | wc -l"
@@ -26,7 +26,7 @@
- l_docker_upgrade | bool
- include_tasks: upgrade/containerized_upgrade_pull.yml
- when: openshift.common.is_containerized | bool
+ when: openshift_is_containerized | bool
# Prepull the rpms for docker upgrade, but don't install
- name: download docker upgrade rpm
@@ -40,7 +40,7 @@
- include_tasks: upgrade/rpm_upgrade.yml
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index da751bd65..777f4a449 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -8,7 +8,7 @@ Wants={{ openshift_docker_service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 261cac6f1..5f2a94ea2 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -5,18 +5,16 @@ dnsBindAddress: 127.0.0.1:53
dnsRecursiveResolvConf: /etc/origin/node/resolv.conf
{% endif %}
dnsDomain: {{ openshift.common.dns_domain }}
-{% if 'dns_ip' in openshift.node %}
-dnsIP: {{ openshift.node.dns_ip }}
-{% endif %}
+dnsIP: {{ openshift_dns_ip }}
dockerConfig:
execHandlerName: ""
-iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
+iptablesSyncPeriod: "{{ openshift_node_iptables_sync_period }}"
imageConfig:
- format: {{ openshift.node.registry_url }}
+ format: {{ oreg_url_node }}
latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
-kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift_use_crio %}
+kubeletArguments: {{ l2_openshift_node_kubelet_args | default(None) | lib_utils_to_padded_yaml(level=1) }}
+{% if openshift_use_crio | bool %}
container-runtime:
- remote
container-runtime-endpoint:
@@ -45,7 +43,7 @@ networkConfig:
{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
-{% if openshift.node.set_node_ip | bool %}
+{% if openshift_set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
{% endif %}
nodeName: {{ openshift.node.nodename }}
@@ -68,8 +66,8 @@ volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- - {{ openshift.node.proxy_mode }}
+ - {{ openshift_node_proxy_mode }}
{% endif %}
volumeConfig:
localQuota:
- perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
+ perFSGroup: {{ openshift_node_local_quota_per_fsgroup }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8b43beb07..9fe779057 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -3,9 +3,15 @@ Requires={{ openshift_docker_service_name }}.service
After={{ openshift_docker_service_name }}.service
PartOf={{ openshift_service_type }}-node.service
Before={{ openshift_service_type }}-node.service
-{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio | bool %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c 'if [[ -f /usr/bin/docker-current ]]; \
+ then echo DOCKER_ADDTL_BIND_MOUNTS=\"--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro \
+ --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro \
+ --volume=/etc/containers/registries:/etc/containers/registries:ro \
+ {% if l_bind_docker_reg_auth %} --volume={{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\" > \
+ /etc/sysconfig/{{ openshift_service_type }}-node-dep; \
+ else echo "#DOCKER_ADDTL_BIND_MOUNTS=" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi'
ExecStop=
SyslogIdentifier={{ openshift_service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index b174c7023..ae7b147a6 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -38,7 +38,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \
{% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \
-v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ {{ osn_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 37f091c76..1fc9b6e72 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -6,7 +6,7 @@ PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
ExecStartPre=-/usr/bin/docker rm -f openvswitch
-ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ osn_ovs_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 5
ExecStop=/usr/bin/docker stop openvswitch
SyslogIdentifier=openvswitch
diff --git a/roles/openshift_node_certificates/defaults/main.yml b/roles/openshift_node_certificates/defaults/main.yml
index b42b75be9..da1570528 100644
--- a/roles/openshift_node_certificates/defaults/main.yml
+++ b/roles/openshift_node_certificates/defaults/main.yml
@@ -2,4 +2,4 @@
openshift_node_cert_expire_days: 730
openshift_ca_host: ''
-openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False) | bool) else 'docker' }}"
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index 0440bf11a..4362c644a 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 97f1fbbdd..5f73f3bdc 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -31,7 +31,7 @@
node_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool
else (False in (g_node_cert_stat_result.results
| default({})
- | oo_collect(attribute='stat.exists')
+ | lib_utils_oo_collect(attribute='stat.exists')
| list)) }}"
- name: Create openshift_generated_configs_dir if it does not exist
@@ -51,11 +51,11 @@
- name: Generate the node client config
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
- {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config
+ {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
+ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
@@ -70,14 +70,14 @@
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
- name: Generate the node server certificate
command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
+ {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert
--cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
--key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
--expire-days={{ openshift_node_cert_expire_days }}
@@ -89,18 +89,11 @@
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
with_items: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ | lib_utils_oo_select_keys(groups['oo_nodes_to_config'])
+ | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: node_cert_mktemp
- changed_when: False
- when: node_certs_missing | bool
- become: no
-
- name: Create a tarball of the node config directories
command: >
tar -czvf {{ openshift_node_generated_config_dir }}.tgz
@@ -117,8 +110,7 @@
- name: Retrieve the node config tarballs from the master
fetch:
src: "{{ openshift_node_generated_config_dir }}.tgz"
- dest: "{{ node_cert_mktemp.stdout }}/"
- flat: yes
+ dest: "/tmp"
fail_on_missing: yes
validate_checksum: yes
when: node_certs_missing | bool
@@ -132,15 +124,14 @@
- name: Unarchive the tarball on the node
unarchive:
- src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz"
+ src: "/tmp/{{ inventory_hostname }}/{{ openshift_node_generated_config_dir }}.tgz"
dest: "{{ openshift_node_cert_dir }}"
when: node_certs_missing | bool
- name: Delete local temp directory
- local_action: file path="{{ node_cert_mktemp.stdout }}" state=absent
+ local_action: file path="/tmp/{{ inventory_hostname }}" state=absent
changed_when: False
when: node_certs_missing | bool
- become: no
- name: Copy OpenShift CA to system CA trust
copy:
diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
deleted file mode 100644
index 69069f2dc..000000000
--- a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift-node
-'''
-from ansible import errors
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use by openshift_node_facts role'''
-
- @staticmethod
- def node_get_dns_ip(openshift_dns_ip, hostvars):
- ''' Navigates the complicated logic of when to set dnsIP
-
- In all situations if they've set openshift_dns_ip use that
- For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
- For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
- For 1.2/3.2+ installs we set to the node's default interface ip
- '''
-
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- # We always use what they've specified if they've specified a value
- if openshift_dns_ip is not None:
- return openshift_dns_ip
- return hostvars['ansible_default_ipv4']['address']
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {'node_get_dns_ip': self.node_get_dns_ip}
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
deleted file mode 100644
index c234a3000..000000000
--- a/roles/openshift_node_facts/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-- name: Set node facts
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- # Reset node labels to an empty dictionary.
- - role: node
- local_facts:
- labels: {}
- - role: node
- local_facts:
- annotations: "{{ openshift_node_annotations | default(none) }}"
- iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- labels: "{{ openshift_node_labels | default(None) }}"
- registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
- storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
- set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
- node_image: "{{ osn_image | default(None) }}"
- ovs_image: "{{ osn_ovs_image | default(None) }}"
- proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
- local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
- dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}"
- env_vars: "{{ openshift_node_env_vars | default(None) }}"
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index 7c81409a5..cccdea66f 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -17,7 +17,13 @@ openshift_node_group_edits: []
openshift_node_group_namespace: openshift-node
openshift_node_group_labels: []
-openshift_imageconfig_format: "{{ oreg_url if oreg_url is defined else openshift.node.registry_url }}"
+openshift_oreg_url_default_dict:
+ origin: "openshift/origin-${component}:${version}"
+ openshift-enterprise: openshift3/ose-${component}:${version}
+openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}"
+oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}"
+
+openshift_imageconfig_format: "{{ oreg_url_node }}"
openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 65a647b8f..77be1f2b1 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -8,6 +8,7 @@ openshift_openstack_num_etcd: 0
openshift_openstack_num_masters: 1
openshift_openstack_num_nodes: 1
openshift_openstack_num_infra: 1
+openshift_openstack_num_cns: 0
openshift_openstack_dns_nameservers: []
openshift_openstack_nodes_to_remove: []
@@ -57,6 +58,7 @@ openshift_openstack_stack_name: "{{ openshift_openstack_clusterid }}.{{ openshif
openshift_openstack_subnet_prefix: "192.168.99"
openshift_openstack_master_hostname: master
openshift_openstack_infra_hostname: infra-node
+openshift_openstack_cns_hostname: cns
openshift_openstack_node_hostname: app-node
openshift_openstack_lb_hostname: lb
openshift_openstack_etcd_hostname: etcd
@@ -66,8 +68,10 @@ openshift_openstack_etcd_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_node_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_infra_flavor: "{{ openshift_openstack_default_flavor }}"
+openshift_openstack_cns_flavor: "{{ openshift_openstack_default_flavor }}"
openshift_openstack_master_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_infra_image: "{{ openshift_openstack_default_image_name }}"
+openshift_openstack_cns_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_node_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_lb_image: "{{ openshift_openstack_default_image_name }}"
openshift_openstack_etcd_image: "{{ openshift_openstack_default_image_name }}"
@@ -84,6 +88,7 @@ openshift_openstack_infra_server_group_policies: []
openshift_openstack_docker_volume_size: 15
openshift_openstack_master_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_infra_volume_size: "{{ openshift_openstack_docker_volume_size }}"
+openshift_openstack_cns_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size }}"
openshift_openstack_etcd_volume_size: 2
openshift_openstack_lb_volume_size: 5
diff --git a/roles/openshift_openstack/tasks/check-prerequisites.yml b/roles/openshift_openstack/tasks/check-prerequisites.yml
index 30996cc47..1e487d434 100644
--- a/roles/openshift_openstack/tasks/check-prerequisites.yml
+++ b/roles/openshift_openstack/tasks/check-prerequisites.yml
@@ -91,6 +91,7 @@
with_items:
- "{{ openshift_openstack_master_image }}"
- "{{ openshift_openstack_infra_image }}"
+ - "{{ openshift_openstack_cns_image }}"
- "{{ openshift_openstack_node_image }}"
- "{{ openshift_openstack_lb_image }}"
- "{{ openshift_openstack_etcd_image }}"
@@ -100,6 +101,7 @@
with_items:
- "{{ openshift_openstack_master_flavor }}"
- "{{ openshift_openstack_infra_flavor }}"
+ - "{{ openshift_openstack_cns_flavor }}"
- "{{ openshift_openstack_node_flavor }}"
- "{{ openshift_openstack_lb_flavor }}"
- "{{ openshift_openstack_etcd_flavor }}"
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 8d13eb81e..1be5d3a62 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -419,6 +419,46 @@ resources:
port_range_min: 443
port_range_max: 443
+ cns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-cns-secgrp
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cns cluster nodes
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ rules:
+ # glusterfs_sshd
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2222
+ port_range_max: 2222
+ # heketi dialing backends
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ # glusterfs_management
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24007
+ port_range_max: 24007
+ # glusterfs_rdma
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24008
+ port_range_max: 24008
+ # glusterfs_bricks
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 49152
+ port_range_max: 49251
+
{% if openshift_openstack_num_masters|int > 1 %}
lb-secgrp:
type: OS::Neutron::SecurityGroup
@@ -764,3 +804,58 @@ resources:
depends_on:
- interface
{% endif %}
+
+ cns:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ openshift_openstack_num_cns }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: sub_type_k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+ sub_type_k8s_type: {{ openshift_openstack_cns_hostname }}
+ cluster_env: {{ openshift_openstack_public_dns_domain }}
+ cluster_id: {{ openshift_openstack_stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: cns
+ cluster_id: {{ openshift_openstack_stack_name }}
+ type: cns
+ image: {{ openshift_openstack_cns_image }}
+ flavor: {{ openshift_openstack_cns_flavor }}
+ key_name: {{ openshift_openstack_keypair_name }}
+{% if openshift_openstack_provider_network_name %}
+ net: {{ openshift_openstack_provider_network_name }}
+ net_name: {{ openshift_openstack_provider_network_name }}
+{% else %}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ openshift_openstack_stack_name }}
+{% if openshift_use_flannel|default(False)|bool %}
+ attach_data_net: true
+ data_net: { get_resource: data_net }
+ data_subnet: { get_resource: data_subnet }
+{% endif %}
+{% endif %}
+ secgrp:
+{% if openshift_openstack_flat_secgrp|default(False)|bool %}
+ - { get_resource: flat-secgrp }
+{% else %}
+ - { get_resource: node-secgrp }
+{% endif %}
+ - { get_resource: cns-secgrp }
+ - { get_resource: common-secgrp }
+{% if not openshift_openstack_provider_network_name %}
+ floating_network: {{ openshift_openstack_external_network_name }}
+{% endif %}
+ volume_size: {{ openshift_openstack_cns_volume_size }}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index a829da34f..1e73c9e1c 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -212,6 +212,9 @@ resources:
host-type: { get_param: type }
sub-host-type: { get_param: subtype }
node_labels: { get_param: node_labels }
+{% if openshift_openstack_dns_nameservers %}
+ openshift_hostname: { get_param: name }
+{% endif %}
scheduler_hints: { get_param: scheduler_hints }
{% if use_trunk_ports|default(false)|bool %}
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 48b0699ab..aea7616bf 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index 0b4dd7d1f..b1d9c8cca 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -26,7 +26,8 @@
when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
- name: create standard pv and pvc lists
- # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ # generate_pv_pvcs_list is a custom action module defined in
+ # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
generate_pv_pvcs_list: {}
register: l_pv_pvcs_list
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
index 346605ff7..865269b7a 100644
--- a/roles/openshift_persistent_volumes/tasks/pv.yml
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -8,10 +8,10 @@
- name: Create PersistentVolumes
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/persistent-volumes.yml
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pv_create_output
when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0"
changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
index e44f9b18f..6c12d128c 100644
--- a/roles/openshift_persistent_volumes/tasks/pvc.yml
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -8,10 +8,10 @@
- name: Create PersistentVolumeClaims
command: >
- {{ openshift.common.client_binary }} create
+ {{ openshift_client_binary }} create
-f {{ mktemp.stdout }}/persistent-volume-claims.yml
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pvc_create_output
when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0"
changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
index d40417a9a..fac589a92 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2
@@ -8,7 +8,7 @@ items:
metadata:
name: "{{ claim.name }}"
spec:
- accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }}
+ accessModes: {{ claim.access_modes | lib_utils_to_padded_yaml(2, 2) }}
resources:
requests:
storage: "{{ claim.capacity }}"
diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
index 9ec14208b..354561432 100644
--- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
+++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
@@ -16,6 +16,6 @@ items:
spec:
capacity:
storage: "{{ volume.capacity }}"
- accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
- {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
+ accessModes: {{ volume.access_modes | lib_utils_to_padded_yaml(2, 2) }}
+ {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | lib_utils_to_padded_yaml(3, 2) }}
{% endfor %}
diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml
index c31ee5795..3403840fb 100644
--- a/roles/openshift_project_request_template/tasks/main.yml
+++ b/roles/openshift_project_request_template/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Generate default project template
command: |
- {{ openshift.common.client_binary | quote }} \
+ {{ openshift_client_binary | quote }} \
--config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \
--output yaml \
adm create-bootstrap-project-template \
@@ -28,7 +28,7 @@
- name: Create or update project request template
command: |
- {{ openshift.common.client_binary }} \
+ {{ openshift_client_binary }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
--namespace {{ openshift_project_request_template_namespace | quote }} \
apply --filename {{ mktemp.stdout }}
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
index 33188bb7e..69c5e0ee2 100644
--- a/roles/openshift_prometheus/meta/main.yaml
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -15,5 +15,6 @@ galaxy_info:
categories:
- openshift
dependencies:
-- { role: lib_openshift }
-- { role: openshift_facts }
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index abc5dd476..749df5152 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -5,7 +5,7 @@
oc_project:
state: present
name: "{{ openshift_prometheus_namespace }}"
- node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ node_selector: "{{ openshift_prometheus_node_selector | lib_utils_oo_selector_to_string_list() }}"
description: Prometheus
# secrets
@@ -16,7 +16,7 @@
namespace: "{{ openshift_prometheus_namespace }}"
contents:
- path: session_secret
- data: "{{ 43 | oo_random_word }}="
+ data: "{{ 43 | lib_utils_oo_random_word }}="
with_items:
- prometheus
- alerts
@@ -39,7 +39,7 @@
# TODO remove this when annotations are supported by oc_serviceaccount
- name: annotate serviceaccount
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
serviceaccount prometheus
serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
@@ -97,7 +97,7 @@
# TODO remove this when annotations are supported by oc_service
- name: annotate prometheus service
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
service prometheus
prometheus.io/scrape='true'
prometheus.io/scheme=https
@@ -105,7 +105,7 @@
- name: annotate alerts service
command: >
- {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
# create prometheus and alerts routes
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index 38798e1f5..b859eb111 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- "{{ openshift_deployment_type }}.yml"
diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml
index cb9278eb7..5ef352bcd 100644
--- a/roles/openshift_provisioners/meta/main.yaml
+++ b/roles/openshift_provisioners/meta/main.yaml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index e543d753c..de763f6cf 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -1,7 +1,7 @@
---
- name: Check efs current replica count
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs
-o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}
register: efs_replica_count
when: not ansible_check_mode
@@ -58,7 +58,7 @@
# anyuid in order to run as root & chgrp shares with allocated gids
- name: "Check efs anyuid permissions"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
get scc/anyuid -o jsonpath='{.users}'
register: efs_anyuid
check_mode: no
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index 49d03f203..239e1f1cc 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -1,7 +1,7 @@
---
- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
@@ -11,16 +11,18 @@
- name: Applying {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
get {{file_content.kind}} {{file_content.metadata.name}}
-o jsonpath='{.metadata.resourceVersion}'
-n {{namespace}}
@@ -32,20 +34,24 @@
- name: Removing previous {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
+ failed_when:
+ - "'error' in generation_delete.stderr"
+ - "generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
- name: Recreating {{file_name}}
command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ {{ openshift_client_binary }} --config={{ kubeconfig }}
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
index 602dee773..ac12087ec 100644
--- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -5,7 +5,7 @@
# delete the deployment objects that we had created
- name: delete provisioner api objects
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- dc
@@ -15,7 +15,7 @@
# delete our old secrets
- name: delete provisioner secrets
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- provisioners-efs
@@ -26,7 +26,7 @@
# delete cluster role bindings
- name: delete cluster role bindings
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true
with_items:
- run-provisioners-efs
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 2ada20767..911005bb6 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -40,9 +40,9 @@
- include_tasks: rhel_repos.yml
when:
- ansible_distribution == 'RedHat'
- - deployment_type == 'openshift-enterprise'
+ - openshift_deployment_type == 'openshift-enterprise'
- rhsub_user is defined
- - rhsub_password is defined
+ - rhsub_pass is defined
- include_tasks: centos_repos.yml
when:
diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml
index c384cbe9a..8d16629cc 100644
--- a/roles/openshift_repos/tasks/rhel_repos.yml
+++ b/roles/openshift_repos/tasks/rhel_repos.yml
@@ -6,11 +6,10 @@
failed_when: repo_rhui.rc == 11
- name: Disable RHEL rhui repositories
- command: bash -c "yum-config-manager \
+ command: yum-config-manager \
--disable 'rhui-REGION-client-config-server-7' \
--disable 'rhui-REGION-rhel-server-rh-common' \
- --disable 'rhui-REGION-rhel-server-releases' \
- --disable 'rhui-REGION-client-config-server-7'"
+ --disable 'rhui-REGION-rhel-server-releases'
when: repo_rhui.changed
- name: Ensure RHEL repositories are enabled
diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2
new file mode 100644
index 000000000..db214af2c
--- /dev/null
+++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2
@@ -0,0 +1,27 @@
+[centos-openshift-origin37]
+name=CentOS OpenShift Origin
+baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin37/
+enabled=1
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-testing]
+name=CentOS OpenShift Origin Testing
+baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin37/
+enabled={{ 1 if openshift_repos_enable_testing else 0 }}
+gpgcheck=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-debuginfo]
+name=CentOS OpenShift Origin DebugInfo
+baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
+
+[centos-openshift-origin37-source]
+name=CentOS OpenShift Origin Source
+baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin37/
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
index 72c47b8ee..14f1f72c2 100644
--- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
@@ -6,15 +6,6 @@
import re
-# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def vars_with_pattern(source, pattern=""):
''' Returns a list of variables whose name matches the given pattern '''
if source == '':
@@ -39,6 +30,5 @@ class FilterModule(object):
def filters(self):
''' Returns the names of the filters provided by this class '''
return {
- 'map_from_pairs': map_from_pairs,
'vars_with_pattern': vars_with_pattern
}
diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml
index f5b37186e..cde3eccb6 100644
--- a/roles/openshift_sanitize_inventory/meta/main.yml
+++ b/roles/openshift_sanitize_inventory/meta/main.yml
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
- system
-dependencies: []
+dependencies:
+- role: lib_utils
+- role: lib_openshift
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 651d896cf..62d460272 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -3,37 +3,11 @@
# the user would also be aware of any deprecated variables they should note to adjust
- include_tasks: deprecations.yml
-- name: Abort when conflicting deployment type variables are set
- when:
- - deployment_type is defined
- - openshift_deployment_type is defined
- - openshift_deployment_type != deployment_type
- fail:
- msg: |-
- openshift_deployment_type is set to "{{ openshift_deployment_type }}".
- deployment_type is set to "{{ deployment_type }}".
- To avoid unexpected results, this conflict is not allowed.
- deployment_type is deprecated in favor of openshift_deployment_type.
- Please specify only openshift_deployment_type, or make both the same.
-
- name: Standardize on latest variable names
set_fact:
- # goal is to deprecate deployment_type in favor of openshift_deployment_type.
- # both will be accepted for now, but code should refer to the new name.
- # TODO: once this is well-documented, add deprecation notice if using old name.
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
- openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
-- name: Abort when deployment type is invalid
- # this variable is required; complain early and clearly if it is invalid.
- when: openshift_deployment_type not in known_openshift_deployment_types
- fail:
- msg: |-
- Please set openshift_deployment_type to one of:
- {{ known_openshift_deployment_types | join(', ') }}
-
- name: Normalize openshift_release
set_fact:
# Normalize release if provided, e.g. "v3.5" => "3.5"
diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml
index 0fc2372d2..df15948d2 100644
--- a/roles/openshift_sanitize_inventory/vars/main.yml
+++ b/roles/openshift_sanitize_inventory/vars/main.yml
@@ -1,7 +1,4 @@
---
-# origin uses community packages named 'origin'
-# openshift-enterprise uses Red Hat packages named 'atomic-openshift'
-known_openshift_deployment_types: ['origin', 'openshift-enterprise']
__deprecation_header: "[DEPRECATION WARNING]:"
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 7c848cb12..15ca9838c 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,6 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+openshift_service_catalog_async_bindings_enabled: false
openshift_use_openshift_sdn: True
# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cd7bda2c6..72110b18c 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -12,7 +12,7 @@
- name: Generate signing cert
command: >
- {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
--key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
--serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
@@ -59,11 +59,6 @@
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
-- shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
- register: get_apiservices
- changed_when: no
-
- name: Create api service
oc_obj:
state: present
@@ -86,4 +81,3 @@
caBundle: "{{ apiserver_ca.content }}"
groupPriorityMinimum: 20
versionPriority: 10
- when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 41a6691c9..9b38a85c4 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -6,10 +6,10 @@
register: mktemp
changed_when: False
-- name: Set default image variables based on deployment_type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: Set service_catalog image facts
@@ -38,7 +38,7 @@
- name: Make kube-service-catalog project network global
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog
- include_tasks: generate_certs.yml
@@ -88,14 +88,14 @@
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -111,14 +111,14 @@
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: view
@@ -134,14 +134,14 @@
vars:
original_content: "{{ view_yaml.results.results[0] | to_yaml }}"
when:
- - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+ - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update view role for service catalog access
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml
when:
- - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+ - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
@@ -179,6 +179,8 @@
etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}"
etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}"
node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+ # apiserver_ca is defined in generate_certs.yml
+ ca_hash: "{{ apiserver_ca.content|hash('sha1') }}"
- name: Set Service Catalog API Server daemonset
oc_obj:
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index a832e1f85..aa32d0513 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,7 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
# TODO: this module doesn't currently remove this
#- name: Remove service catalog api service
@@ -48,7 +48,7 @@
- name: Remove Service Catalog kube-system Role Bindinds
shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
@@ -58,7 +58,7 @@
- name: Remove Service Catalog kube-service-catalog Role Bindinds
shell: >
- {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
+ {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -
- oc_obj:
kind: template
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
index b143292b6..84e542eaf 100644
--- a/roles/openshift_service_catalog/tasks/start_api_server.yml
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -5,7 +5,7 @@
name: "{{ openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | lib_utils_oo_dict_to_list_of_dict }}"
# wait to see that the apiserver is available
- name: wait for api server to be ready
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 4f51b8c3c..e345df32c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -14,6 +14,8 @@ spec:
type: RollingUpdate
template:
metadata:
+ annotations:
+ ca_hash: {{ ca_hash }}
labels:
app: apiserver
spec:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 137222f04..c61e05f73 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -8,7 +8,7 @@ spec:
selector:
matchLabels:
app: controller-manager
- strategy:
+ updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
@@ -38,6 +38,10 @@ spec:
- "5m"
- --feature-gates
- OriginatingIdentity=true
+{% if openshift_service_catalog_async_bindings_enabled | bool %}
+ - --feature-gates
+ - AsyncBindingOperations=true
+{% endif %}
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index da34fab2a..4cbe262d2 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -6,16 +6,16 @@ openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_gluste
openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_storageclass_default: False
-openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
-openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
+openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
openshift_storage_glusterfs_block_host_vol_create: True
openshift_storage_glusterfs_block_host_vol_size: 100
openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
-openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
+openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
openshift_storage_glusterfs_s3_account: "{{ omit }}"
openshift_storage_glusterfs_s3_user: "{{ omit }}"
@@ -29,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is
openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_cli: 'heketi-cli'
-openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"
openshift_storage_glusterfs_heketi_user_key: "{{ omit }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
new file mode 100644
index 000000000..63dd5cce6
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
@@ -0,0 +1,104 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
deleted file mode 100644
index a86c96df7..000000000
--- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
+++ /dev/null
@@ -1,23 +0,0 @@
-'''
- Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
-'''
-
-
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
-# pylint: disable=too-few-public-methods
-class FilterModule(object):
- ''' OpenShift Storage GlusterFS Filters '''
-
- # pylint: disable=no-self-use, too-few-public-methods
- def filters(self):
- ''' Returns the names of the filters provided by this class '''
- return {
- 'map_from_pairs': map_from_pairs
- }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
index 6a4ef942b..aa20245d5 100644
--- a/roles/openshift_storage_glusterfs/meta/main.yml
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -12,4 +12,4 @@ galaxy_info:
dependencies:
- role: openshift_facts
- role: lib_openshift
-- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
index 1664ecc1e..5b4c16740 100644
--- a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
@@ -63,7 +63,7 @@
until:
- "gluster_s3_pvcs.results.results[0]['items'] | count > 0"
# Pod's 'Bound' status must be True
- - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
+ - "gluster_s3_pvcs.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
@@ -108,6 +108,6 @@
until:
- "gluster_s3_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "gluster_s3_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index d6be8c726..e5dcdcab7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -61,6 +61,6 @@
until:
- "glusterblock_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "glusterblock_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index d11023a39..001578406 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -2,7 +2,7 @@
- name: Make sure heketi-client is installed
package: name=heketi-client state=present
when:
- - not openshift.common.is_atomic | bool
+ - not openshift_is_atomic | bool
- not glusterfs_heketi_is_native | bool
register: result
until: result is succeeded
@@ -126,7 +126,7 @@
- "glusterfs_heketi_is_native"
- "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Check for existing heketi pod
oc_obj:
@@ -144,7 +144,7 @@
- "glusterfs_heketi_is_native"
- "heketi_pod.results.results[0]['items'] | count > 0"
# heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Generate topology file
template:
@@ -177,14 +177,14 @@
- name: Generate heketi admin key
set_fact:
- glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
+ glusterfs_heketi_admin_key: "{{ 32 | lib_utils_oo_generate_secret }}"
when:
- glusterfs_heketi_is_native
- glusterfs_heketi_admin_key is undefined
- name: Generate heketi user key
set_fact:
- glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+ glusterfs_heketi_user_key: "{{ 32 | lib_utils_oo_generate_secret }}"
until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key"
delay: 1
retries: 10
@@ -228,7 +228,7 @@
until:
- "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when:
@@ -238,14 +238,14 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
- name: Place heketi topology on heketi Pod
- shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
+ shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
when:
- glusterfs_heketi_is_native
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 2ea7286f3..a374df0ce 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 0c2fcb2c5..4cc82f1ad 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -21,7 +21,7 @@
name: "{{ hostvars[item].openshift.node.nodename }}"
kind: node
state: absent
- labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
when: "'openshift' in hostvars[item] and glusterfs_wipe"
@@ -60,7 +60,7 @@
name: "{{ hostvars[item].openshift.node.nodename }}"
kind: node
state: add
- labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
with_items: "{{ glusterfs_nodes | default([]) }}"
- name: Copy GlusterFS DaemonSet template
@@ -109,6 +109,6 @@
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+ - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index b7cff6514..544a6f491 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index d23bd42b9..c0a8c53de 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -15,7 +15,7 @@
# Need `command` here because heketi-storage.json contains multiple objects.
- name: Copy heketi DB to GlusterFS volume
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+ command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
when: setup_storage.rc == 0
- name: Wait for copy job to finish
@@ -28,14 +28,14 @@
until:
- "'results' in heketi_job.results and heketi_job.results.results | count > 0"
# Pod's 'Complete' status must be True
- - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
# Fail when pod's 'Failed' status is True
- - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
when: setup_storage.rc == 0
- name: Delete deploy resources
@@ -120,13 +120,13 @@
until:
- "heketi_pod.results.results[0]['items'] | count > 0"
# Pod's 'Ready' status must be True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..ca87807fe
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/glusterfs-storageclass.yml.j2
@@ -0,0 +1,17 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index d61e6873a..3ae04e59f 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: lib_os_firewall
+- role: lib_utils
- role: openshift_facts
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index cc674d3fd..a11219f6d 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -1,7 +1,7 @@
# openshift_storage_nfs_lvm
This role is useful to create and export nfs disks for openshift persistent volumes.
-It does so by creating lvm partitions on an already setup pv/vg, creating xfs
+It does so by creating lvm partitions on an already setup pv/vg, creating xfs
filesystem on each partition, mounting the partitions, exporting the mounts via NFS
and creating a json file for each mount that an openshift master can use to
create persistent volumes.
@@ -20,7 +20,7 @@ create persistent volumes.
osnl_nfs_export_options: "*(rw,sync,all_squash)"
# Directory, where the created partitions should be mounted. They will be
-# mounted as <osnl_mount_dir>/<lvm volume name>
+# mounted as <osnl_mount_dir>/<lvm volume name>
osnl_mount_dir: /exports/openshift
# Volume Group to use.
@@ -64,11 +64,10 @@ None
## Example Playbook
With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004
-Both of them are mounted into `/exports/openshift` directory. Both directories are
+Both of them are mounted into `/exports/openshift` directory. Both directories are
exported via NFS. json files are created in /root.
- hosts: nfsservers
- become: no
remote_user: root
gather_facts: no
roles:
@@ -94,7 +93,6 @@ exported via NFS. json files are created in /root.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index 50d94f6a3..de47708a5 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -16,3 +16,4 @@ galaxy_info:
- openshift
dependencies:
- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index c8e7b6d7c..ff92e59e5 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -2,7 +2,7 @@
# TODO -- this may actually work on atomic hosts
- fail:
msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
- when: openshift.common.is_atomic | bool
+ when: openshift_is_atomic | bool
- name: Create lvm volumes
lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 94dc63bd2..9a72adbdc 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,7 +1,7 @@
---
- name: Install NFS server
package: name=nfs-utils state=present
- when: not openshift.common.is_containerized | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 354699637..e2e6538c9 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -8,3 +8,5 @@ openshift_service_type_dict:
openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
openshift_use_crio_only: False
+
+l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}"
diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml
new file mode 100644
index 000000000..fea0daf77
--- /dev/null
+++ b/roles/openshift_version/tasks/check_available_rpms.yml
@@ -0,0 +1,10 @@
+---
+- name: Get available {{ openshift_service_type}} version
+ repoquery:
+ name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
+ ignore_excluders: true
+ register: rpm_results
+
+- fail:
+ msg: "Package {{ openshift_service_type}} not found"
+ when: not rpm_results.results.package_found
diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml
new file mode 100644
index 000000000..374725086
--- /dev/null
+++ b/roles/openshift_version/tasks/first_master.yml
@@ -0,0 +1,30 @@
+---
+# Determine the openshift_version to configure if none has been specified or set previously.
+
+# Protect the installed version by default unless explicitly told not to, or given an
+# openshift_version already.
+- name: Use openshift.common.version fact as version to configure if already installed
+ set_fact:
+ openshift_version: "{{ openshift.common.version }}"
+ when:
+ - openshift.common.version is defined
+ - openshift_version is not defined or openshift_version == ""
+ - openshift_protect_installed_version | bool
+
+- include_tasks: "{{ l_first_master_version_task_file }}"
+
+- block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when:
+ - openshift_pkg_version is not defined
+ - openshift_upgrade_target is not defined
+
+- block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
+ when: openshift_image_tag is not defined
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index 71f957b78..3ed1d2cfe 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -7,6 +7,7 @@
when:
- openshift_image_tag is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -20,7 +21,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined
+ - openshift_version is not defined or openshift_version_reinit | default(false)
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -34,7 +35,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or openshift_version_reinit | default(false)
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
@@ -62,4 +63,4 @@
# dangly +c0mm1t-offset tags in the version. See also,
# openshift_facts.py
- set_fact:
- openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}"
+ openshift_version: "{{ openshift_version | lib_utils_oo_chomp_commit_offset }}"
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
new file mode 100644
index 000000000..5d92f90c6
--- /dev/null
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -0,0 +1,20 @@
+---
+- name: Set rpm version to configure if openshift_pkg_version specified
+ set_fact:
+ # Expects a leading "-" in inventory, strip it off here, and remove trailing release,
+ openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
+ when:
+ - openshift_pkg_version is defined
+ - openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
+
+# These tasks should only be run against masters and nodes
+- name: Set openshift_version for rpm installation
+ include_tasks: check_available_rpms.yml
+
+- set_fact:
+ openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
+- set_fact:
+ openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version_reinit | default(false)
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index e50d5371e..b42794858 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -1,210 +1,2 @@
---
-# Determine the openshift_version to configure if none has been specified or set previously.
-
-- set_fact:
- is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
- is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
-
-# Block attempts to install origin without specifying some kind of version information.
-# This is because the latest tags for origin are usually alpha builds, which should not
-# be used by default. Users must indicate what they want.
-- name: Abort when we cannot safely guess what Origin image version the user wanted
- fail:
- msg: |-
- To install a containerized Origin release, you must set openshift_release or
- openshift_image_tag in your inventory to specify which version of the OpenShift
- component images to use. You may want the latest (usually alpha) releases or
- a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
- when:
- - is_containerized | bool
- - openshift.common.deployment_type == 'origin'
- - openshift_release is not defined
- - openshift_image_tag is not defined
-
-# Normalize some values that we need in a certain format that might be confusing:
-- set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when:
- - openshift_release is defined
- - openshift_release[0] == 'v'
-
-- set_fact:
- openshift_release: "{{ openshift_release | string }}"
- when:
- - openshift_release is defined
-
-# Verify that the image tag is in a valid format
-- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
- block:
-
- # Verifies that when the deployment type is origin the version:
- # - starts with a v
- # - Has 3 integers seperated by dots
- # It also allows for optional trailing data which:
- # - must start with a dash
- # - may contain numbers, letters, dashes and dots.
- - name: (Origin) Verify openshift_image_tag is valid
- when: openshift.common.deployment_type == 'origin'
- assert:
- that:
- - "{{ openshift_image_tag is match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: |-
- openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
- You specified openshift_image_tag={{ openshift_image_tag }}
-
- # Verifies that when the deployment type is openshift-enterprise the version:
- # - starts with a v
- # - Has at least 2 integers seperated by dots
- # It also allows for optional trailing data which:
- # - must start with a dash
- # - may contain numbers
- # - may containe dots (https://github.com/openshift/openshift-ansible/issues/5192)
- #
- - name: (Enterprise) Verify openshift_image_tag is valid
- when: openshift.common.deployment_type == 'openshift-enterprise'
- assert:
- that:
- - "{{ openshift_image_tag is match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
- msg: |-
- openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
- v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
- You specified openshift_image_tag={{ openshift_image_tag }}
-
-# Make sure we copy this to a fact if given a var:
-- set_fact:
- openshift_version: "{{ openshift_version | string }}"
- when: openshift_version is defined
-
-# Protect the installed version by default unless explicitly told not to, or given an
-# openshift_version already.
-- name: Use openshift.common.version fact as version to configure if already installed
- set_fact:
- openshift_version: "{{ openshift.common.version }}"
- when:
- - openshift.common.version is defined
- - openshift_version is not defined or openshift_version == ""
- - openshift_protect_installed_version | bool
-
-# The rest of these tasks should only execute on
-# masters and nodes as we can verify they have subscriptions
-- when:
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
- block:
- - name: Set openshift_version for rpm installation
- include_tasks: set_version_rpm.yml
- when: not is_containerized | bool
-
- - name: Set openshift_version for containerized installation
- include_tasks: set_version_containerized.yml
- when: is_containerized | bool
-
- - block:
- - name: Get available {{ openshift_service_type}} version
- repoquery:
- name: "{{ openshift_service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift_service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
- when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
- # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
- # NOTE: This will need to be modified/removed for future container + rpm installations work.
- - name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
- # At this point we know openshift_version is set appropriately. Now we set
- # openshift_image_tag and openshift_pkg_version, so all roles can always assume
- # each of this variables *will* be set correctly and can use them per their
- # intended purpose.
-
- - block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
-
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
-
- when: openshift_image_tag is not defined
-
- - block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
-
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
-
- when:
- - openshift_pkg_version is not defined
- - openshift_upgrade_target is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
-
- - fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when:
- - openshift_pkg_version is not defined
- - openshift_upgrade_target is not defined
-
-
- - fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
-
- # We can't map an openshift_release to full rpm version like we can with containers; make sure
- # the rpm version we looked up matches the release requested and error out if not.
- - name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
-
- # The end result of these three variables is quite important so make sure they are displayed and logged:
- - debug: var=openshift_release
-
- - debug: var=openshift_image_tag
-
- - debug: var=openshift_pkg_version
+# This role is meant to be used with include_role.
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
new file mode 100644
index 000000000..eddd5ff42
--- /dev/null
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -0,0 +1,42 @@
+---
+# These tasks should only be run against masters and nodes
+
+- block:
+ - name: Check openshift_version for rpm installation
+ include_tasks: check_available_rpms.yml
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_version not in rpm_results.results.versions.available_versions.0
+ - openshift_version_reinit | default(false)
+
+ # block when
+ when: not openshift_is_atomic | bool
+
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
+# the rpm version we looked up matches the release requested and error out if not.
+- name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not openshift_is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - l_rpm_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ l_rpm_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
+ vars:
+ l_rpm_version: "{{ rpm_results.results.versions.available_versions.0 }}"
+
+# The end result of these three variables is quite important so make sure they are displayed and logged:
+- debug: var=openshift_release
+
+- debug: var=openshift_image_tag
+
+- debug: var=openshift_pkg_version
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
deleted file mode 100644
index c7ca5ceae..000000000
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Set rpm version to configure if openshift_pkg_version specified
- set_fact:
- # Expects a leading "-" in inventory, strip it off here, and remove trailing release,
- openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
- when:
- - openshift_pkg_version is defined
- - openshift_version is not defined
-
-- block:
- - name: Get available {{ openshift_service_type}} version
- repoquery:
- name: "{{ openshift_service_type}}"
- ignore_excluders: true
- register: rpm_results
-
- - fail:
- msg: "Package {{ openshift_service_type}} not found"
- when: not rpm_results.results.package_found
-
- - set_fact:
- openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- when:
- - openshift_version is not defined
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
new file mode 100644
index 000000000..4f395398c
--- /dev/null
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
+openshift_web_console_nodeselector: {"region":"infra"}
diff --git a/roles/openshift_web_console/meta/main.yaml b/roles/openshift_web_console/meta/main.yaml
new file mode 100644
index 000000000..033c1e3a3
--- /dev/null
+++ b/roles/openshift_web_console/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift web console
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
new file mode 100644
index 000000000..50e72657f
--- /dev/null
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -0,0 +1,111 @@
+---
+# Fact setting
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set openshift_web_console facts
+ set_fact:
+ openshift_web_console_prefix: "{{ openshift_web_console_prefix | default(__openshift_web_console_prefix) }}"
+ openshift_web_console_version: "{{ openshift_web_console_version | default(__openshift_web_console_version) }}"
+ openshift_web_console_image_name: "{{ openshift_web_console_image_name | default(__openshift_web_console_image_name) }}"
+ # Default the replica count to the number of masters.
+ openshift_web_console_replica_count: "{{ openshift_web_console_replica_count | default(groups.oo_masters_to_config | length) }}"
+
+- name: Ensure openshift-web-console project exists
+ oc_project:
+ name: openshift-web-console
+ state: present
+ node_selector:
+ - ""
+
+- name: Make temp directory for the web console config files
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Copy the web console config template to temp directory
+ copy:
+ src: "{{ __console_files_location }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
+ - "{{ __console_config_file }}"
+
+- name: Update the web console config properties
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+
+ # TODO: The new extensions properties cannot be set until
+ # origin-web-console-server has been updated with the API changes since
+ # `extensions` in the old asset config was an array.
+
+ # - key: extensions#scriptURLs
+ # value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ # - key: extensions#stylesheetURLs
+ # value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ # - key: extensions#properties
+ # value: "{{ openshift_web_console_extension_properties | default({}) }}"
+
+ # DEPRECATED PROPERTIES
+ # These properties have been renamed and will be removed from the install
+ # in a future pull. Keep both the old and new properties for now so that
+ # the install is not broken while the origin-web-console image is updated.
+ - key: publicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: logoutURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ separator: '#'
+ state: present
+
+- slurp:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ register: config
+
+- name: Reconcile with the web console RBAC file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+
+- name: Apply the web console template file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
+ --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
+ --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
+ --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
+ | {{ openshift_client_binary }} apply -f -
+
+- name: Verify that the web console is running
+ command: >
+ curl -k https://webconsole.openshift-web-console.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: console_health
+ until: console_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+
+- name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp.stdout }}"
+ changed_when: False
diff --git a/roles/openshift_web_console/tasks/main.yml b/roles/openshift_web_console/tasks/main.yml
new file mode 100644
index 000000000..937bebf25
--- /dev/null
+++ b/roles/openshift_web_console/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include_tasks: install.yml
+ when: openshift_web_console_install | default(true) | bool
+
+- include_tasks: remove.yml
+ when: not openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_web_console/tasks/remove.yml b/roles/openshift_web_console/tasks/remove.yml
new file mode 100644
index 000000000..f0712a993
--- /dev/null
+++ b/roles/openshift_web_console/tasks/remove.yml
@@ -0,0 +1,5 @@
+---
+- name: Remove openshift-web-console project
+ oc_project:
+ name: openshift-web-console
+ state: absent
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
new file mode 100644
index 000000000..e347c0193
--- /dev/null
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -0,0 +1,71 @@
+---
+# This task updates asset config values in the webconsole-config config map in
+# the openshift-web-console namespace. The values to set are pased in the
+# variable `console_config_edits`, which is an array of objects with `key` and
+# `value` properties in the same format as `yedit` module `edits`. Only
+# properties passed are updated. The separator for nested properties is `#`.
+#
+# Note that this triggers a redeployment on the console and a brief downtime
+# since it uses a `Recreate` strategy.
+#
+# Example usage:
+#
+# - include_role:
+# name: openshift_web_console
+# tasks_from: update_console_config.yml
+# vars:
+# console_config_edits:
+# - key: clusterInfo#loggingPublicURL
+# value: "https://{{ openshift_logging_kibana_hostname }}"
+# when: openshift_web_console_install | default(true) | bool
+
+- name: Read web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config
+
+- name: Make temp directory
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp_console
+ changed_when: False
+
+- name: Copy web console config to temp file
+ copy:
+ content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+- name: Change web console config properties
+ yedit:
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
+
+- name: Update web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: present
+ from_file:
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+- name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp_console.stdout }}"
+ changed_when: False
+
+# TODO: Only rollout if config has changed.
+# There's currently no command to trigger a rollout for a k8s deployment
+# without changing the pod spec. Add an annotation to force a rollout after
+# the config map has been edited.
+- name: Rollout updated web console deployment
+ oc_edit:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ separator: '#'
+ content:
+ spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
new file mode 100644
index 000000000..7adb8a0d0
--- /dev/null
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -0,0 +1,4 @@
+---
+__openshift_web_console_prefix: "docker.io/openshift/"
+__openshift_web_console_version: "latest"
+__openshift_web_console_image_name: "origin-web-console"
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
new file mode 100644
index 000000000..e91048e38
--- /dev/null
+++ b/roles/openshift_web_console/vars/main.yml
@@ -0,0 +1,6 @@
+---
+__console_files_location: "../../../files/origin-components/"
+
+__console_template_file: "console-template.yaml"
+__console_rbac_file: "console-rbac-template.yaml"
+__console_config_file: "console-config.yaml"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..721ac1d27
--- /dev/null
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -0,0 +1,4 @@
+---
+__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_web_console_version: "v3.9"
+__openshift_web_console_image_name: "ose-web-console"
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index be0b8291a..5ee11f7bd 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -32,7 +32,7 @@ Use iptables:
---
- hosts: servers
task:
- - include_role:
+ - import_role:
name: os_firewall
vars:
os_firewall_use_firewalld: false
@@ -44,7 +44,7 @@ Use firewalld:
- hosts: servers
vars:
tasks:
- - include_role:
+ - import_role:
name: os_firewall
vars:
os_firewall_use_firewalld: true
diff --git a/roles/template_service_broker/meta/main.yml b/roles/template_service_broker/meta/main.yml
index ab5a0cf08..f1b56b771 100644
--- a/roles/template_service_broker/meta/main.yml
+++ b/roles/template_service_broker/meta/main.yml
@@ -11,3 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
+dependencies:
+- role: lib_utils
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 1253c1133..604e94602 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -1,9 +1,9 @@
---
# Fact setting
-- name: Set default image variables based on deployment type
+- name: Set default image variables based on openshift_deployment_type
include_vars: "{{ item }}"
with_first_found:
- - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "{{ openshift_deployment_type }}.yml"
- "default_images.yml"
- name: set template_service_broker facts
@@ -21,7 +21,6 @@
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
changed_when: False
- become: no
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
@@ -44,16 +43,16 @@
- name: Apply template file
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift.common.client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -80,10 +79,9 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
- file:
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
- become: no
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index 8b5593ff9..db1b558e4 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -2,7 +2,6 @@
- command: mktemp -d /tmp/tsb-ansible-XXXXXX
register: mktemp
changed_when: False
- become: no
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
@@ -13,11 +12,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
@@ -32,4 +31,3 @@
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
- become: no
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
index 77afe1f43..dc164a4db 100644
--- a/roles/template_service_broker/vars/default_images.yml
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_prefix: "docker.io/openshift/origin-"
__template_service_broker_version: "latest"
-__template_service_broker_image_name: "origin"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
index dfab1e01b..b65b97691 100644
--- a/roles/template_service_broker/vars/openshift-enterprise.yml
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-"
__template_service_broker_version: "v3.7"
-__template_service_broker_image_name: "ose"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml
index e95d274d5..4a28d47b2 100644
--- a/roles/tuned/tasks/main.yml
+++ b/roles/tuned/tasks/main.yml
@@ -11,7 +11,7 @@
block:
- name: Set tuned OpenShift variables
set_fact:
- openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
+ openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}"
- name: Ensure directory structure exists
file: