summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/tasks/install.yml17
-rw-r--r--roles/calico_master/tasks/main.yml2
-rw-r--r--roles/container_runtime/tasks/docker_upgrade_check.yml25
-rw-r--r--roles/container_runtime/tasks/package_docker.yml11
-rw-r--r--roles/etcd/tasks/auxiliary/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml1
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml2
-rw-r--r--roles/kuryr/tasks/node.yaml2
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j219
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2357
-rw-r--r--roles/lib_utils/action_plugins/generate_pv_pvcs_list.py (renamed from roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py)0
-rw-r--r--roles/lib_utils/filter_plugins/oo_cert_expiry.py (renamed from roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py)0
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py34
-rw-r--r--roles/lib_utils/filter_plugins/openshift_aws_filters.py (renamed from roles/openshift_aws/filter_plugins/openshift_aws_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_hosted_filters.py (renamed from roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py)0
-rw-r--r--roles/lib_utils/filter_plugins/openshift_master.py (renamed from roles/openshift_master_facts/filter_plugins/openshift_master.py)6
-rwxr-xr-xroles/lib_utils/library/delegated_serial_command.py (renamed from roles/etcd/library/delegated_serial_command.py)0
-rw-r--r--roles/lib_utils/library/openshift_cert_expiry.py (renamed from roles/openshift_certificate_expiry/library/openshift_cert_expiry.py)0
-rw-r--r--roles/lib_utils/library/openshift_container_binary_sync.py (renamed from roles/openshift_cli/library/openshift_container_binary_sync.py)0
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py143
-rw-r--r--roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py117
-rw-r--r--roles/lib_utils/test/conftest.py (renamed from roles/openshift_certificate_expiry/test/conftest.py)53
-rw-r--r--roles/lib_utils/test/openshift_master_facts_bad_input_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_conftest.py (renamed from roles/openshift_master_facts/test/conftest.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py)0
-rw-r--r--roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py (renamed from roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py)0
-rw-r--r--roles/lib_utils/test/test_fakeopensslclasses.py (renamed from roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py)0
-rw-r--r--roles/lib_utils/test/test_load_and_handle_cert.py (renamed from roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py)0
-rw-r--r--roles/openshift_aws/defaults/main.yml14
-rw-r--r--roles/openshift_aws/tasks/accept_nodes.yml4
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml1
-rw-r--r--roles/openshift_aws/tasks/provision.yml17
-rw-r--r--roles/openshift_aws/tasks/provision_elb.yml15
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml17
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rw-r--r--roles/openshift_aws/tasks/wait_for_groups.yml1
-rw-r--r--roles/openshift_buildoverrides/vars/main.yml1
-rw-r--r--roles/openshift_ca/tasks/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml4
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/vsphere.yml6
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j24
-rw-r--r--roles/openshift_cloud_provider/templates/vsphere.conf.j215
-rw-r--r--roles/openshift_cloud_provider/vars/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json23
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json10
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json5
-rw-r--r--roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json3
-rw-r--r--roles/openshift_examples/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py5
-rw-r--r--roles/openshift_grafana/defaults/main.yml12
-rw-r--r--roles/openshift_grafana/files/grafana-ocp-oauth.yml661
-rw-r--r--roles/openshift_grafana/files/grafana-ocp.yml76
-rw-r--r--roles/openshift_grafana/files/openshift-cluster-monitoring.json5138
-rw-r--r--roles/openshift_grafana/meta/main.yml13
-rw-r--r--roles/openshift_grafana/tasks/gf-permissions.yml12
-rw-r--r--roles/openshift_grafana/tasks/main.yml122
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py43
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py34
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py13
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py27
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py58
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py12
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py23
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py5
-rw-r--r--roles/openshift_hosted/defaults/main.yml2
-rw-r--r--roles/openshift_hosted/tasks/router.yml1
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml13
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py9
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py4
-rw-r--r--roles/openshift_logging/tasks/annotate_ops_projects.yaml1
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml18
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/get_es_version.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml1
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml4
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml2
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml1
-rw-r--r--roles/openshift_master/tasks/upgrade/rpm_upgrade.yml23
-rw-r--r--roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml3
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml8
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml10
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_node/defaults/main.yml18
-rw-r--r--roles/openshift_node/tasks/install.yml38
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml19
-rw-r--r--roles/openshift_node/tasks/upgrade/config_changes.yml6
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml2
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml2
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j224
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j25
-rw-r--r--roles/openshift_persistent_volumes/tasks/main.yml3
-rw-r--r--roles/openshift_persistent_volumes/tasks/pv.yml2
-rw-r--r--roles/openshift_persistent_volumes/tasks/pvc.yml2
-rw-r--r--roles/openshift_provisioners/tasks/oc_apply.yaml12
-rw-r--r--roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py10
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml1
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml6
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml2
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j22
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j26
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml104
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml1
-rw-r--r--roles/openshift_version/tasks/check_available_rpms.yml2
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml5
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml6
-rw-r--r--roles/openshift_version/tasks/masters_and_nodes.yml7
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/tasks/install.yml114
-rw-r--r--roles/openshift_web_console/tasks/rollout_console.yml20
-rw-r--r--roles/openshift_web_console/tasks/update_asset_config.yml68
-rw-r--r--roles/openshift_web_console/tasks/update_console_config.yml66
-rw-r--r--roles/openshift_web_console/vars/default_images.yml4
-rw-r--r--roles/openshift_web_console/vars/main.yml1
-rw-r--r--roles/openshift_web_console/vars/openshift-enterprise.yml4
-rw-r--r--roles/os_firewall/tasks/firewalld.yml5
-rw-r--r--roles/template_service_broker/defaults/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml21
-rw-r--r--roles/template_service_broker/tasks/remove.yml15
-rw-r--r--roles/template_service_broker/vars/default_images.yml4
-rw-r--r--roles/template_service_broker/vars/openshift-enterprise.yml4
-rw-r--r--roles/tuned/tasks/main.yml7
171 files changed, 8285 insertions, 532 deletions
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index ba2f7293b..f869b5fae 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -72,6 +72,15 @@
- apiGroups: ["image.openshift.io", ""]
resources: ["images"]
verbs: ["get", "list"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["clusternetworks", "netnamespaces"]
+ verbs: ["get"]
+ - apiGroups: ["network.openshift.io"]
+ resources: ["netnamespaces"]
+ verbs: ["update"]
+ - apiGroups: ["networking.k8s.io"]
+ resources: ["networkpolicies"]
+ verbs: ["create", "delete"]
- name: Create asb-access cluster role
oc_clusterrole:
@@ -366,6 +375,11 @@
secret:
secretName: etcd-auth-secret
+- name: set auth name and type facts if needed
+ set_fact:
+ ansible_service_broker_registry_auth_type: "secret"
+ ansible_service_broker_registry_auth_name: "asb-registry-auth"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
- name: Create config map for ansible-service-broker
@@ -393,6 +407,8 @@
org: {{ ansible_service_broker_registry_organization }}
tag: {{ ansible_service_broker_registry_tag }}
white_list: {{ ansible_service_broker_registry_whitelist | to_yaml }}
+ auth_type: "{{ ansible_service_broker_registry_auth_type | default("") }}"
+ auth_name: "{{ ansible_service_broker_registry_auth_name | default("") }}"
- type: local_openshift
name: localregistry
namespaces: ['openshift']
@@ -438,6 +454,7 @@
data: "{{ ansible_service_broker_registry_user }}"
- path: password
data: "{{ ansible_service_broker_registry_password }}"
+ when: ansible_service_broker_registry_user != "" and ansible_service_broker_registry_password != ""
- name: Create the Broker resource in the catalog
oc_obj:
diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml
index 05415a4d6..834ebba64 100644
--- a/roles/calico_master/tasks/main.yml
+++ b/roles/calico_master/tasks/main.yml
@@ -23,7 +23,7 @@
-f {{ mktemp.stdout }}/calico-policy-controller.yml
--config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: calico_create_output
- failed_when: ('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout)
+ failed_when: "('already exists' not in calico_create_output.stderr) and ('created' not in calico_create_output.stdout) and calico_create_output.rc != 0"
changed_when: ('created' in calico_create_output.stdout)
- name: Calico Master | Delete temp directory
diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml
index 7831f4c7d..8dd916e79 100644
--- a/roles/container_runtime/tasks/docker_upgrade_check.yml
+++ b/roles/container_runtime/tasks/docker_upgrade_check.yml
@@ -21,6 +21,7 @@
retries: 4
until: curr_docker_version is succeeded
changed_when: false
+ when: not openshift_is_atomic | bool
- name: Get latest available version of Docker
command: >
@@ -29,7 +30,9 @@
retries: 4
until: avail_docker_version is succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
- when: pkg_check.rc == 0
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
failed_when: false
changed_when: false
@@ -37,9 +40,10 @@
msg: This playbook requires access to Docker 1.12 or later
# Disable the 1.12 requirement if the user set a specific Docker version
when:
- - docker_version is not defined
- - docker_upgrade is not defined or docker_upgrade | bool == True
- - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
+ - not openshift_is_atomic | bool
+ - docker_version is not defined
+ - docker_upgrade is not defined or docker_upgrade | bool == True
+ - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
@@ -48,14 +52,17 @@
# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
- when: pkg_check.rc == 0 and docker_version is not defined
+ when:
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0 and docker_version is not defined
- name: Flag for Docker upgrade if necessary
set_fact:
l_docker_upgrade: True
when:
- - pkg_check.rc == 0
- - curr_docker_version.stdout is version_compare(docker_version,'<')
+ - not openshift_is_atomic | bool
+ - pkg_check.rc == 0
+ - curr_docker_version.stdout is version_compare(docker_version,'<')
# Additional checks for Atomic hosts:
- name: Determine available Docker
@@ -70,5 +77,5 @@
- fail:
msg: This playbook requires access to Docker 1.12 or later
when:
- - openshift_is_atomic | bool
- - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
+ - openshift_is_atomic | bool
+ - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index d6e7e7fed..ed9a2709b 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,6 +1,17 @@
---
- include_tasks: common/pre.yml
+# In some cases, some services may be run as containers and docker may still
+# be installed via rpm.
+- include_tasks: common/atomic_proxy.yml
+ when:
+ - >
+ (openshift_use_system_containers | default(False)) | bool
+ or (openshift_use_etcd_system_container | default(False)) | bool
+ or (openshift_use_openvswitch_system_container | default(False)) | bool
+ or (openshift_use_node_system_container | default(False)) | bool
+ or (openshift_use_master_system_container | default(False)) | bool
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift_is_atomic | bool
diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
index 881a8c270..cab835e20 100644
--- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
+++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml
@@ -1,7 +1,7 @@
---
- name: Install etcd for etcdctl
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
- when: not openshift_is_atomic | bool
+ when: not openshift_is_containerized | bool
register: result
until: result is succeeded
diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
index 78578a055..ce295d2f5 100644
--- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml
@@ -57,6 +57,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the client crt
delegated_serial_command:
command: >
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 987380d0c..7c8b87d99 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -50,6 +50,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the server crt
delegated_serial_command:
command: >
@@ -83,6 +84,7 @@
# Certificates must be signed serially in order to avoid competing
# for the serial file.
+# delegated_serial_command is a custom module in lib_utils
- name: Sign and create the peer crt
delegated_serial_command:
command: >
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
index 08f2d5adc..41d0ead20 100644
--- a/roles/kuryr/tasks/node.yaml
+++ b/roles/kuryr/tasks/node.yaml
@@ -40,7 +40,7 @@
regexp: '^OPTIONS="?(.*?)"?$'
backrefs: yes
backup: yes
- line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+ line: 'OPTIONS="\1 --disable proxy"'
- name: force node restart to disable the proxy
service:
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
index 39348ae90..09f4c7dfe 100644
--- a/roles/kuryr/templates/cni-daemonset.yaml.j2
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -26,6 +26,13 @@ spec:
image: kuryr/cni:latest
imagePullPolicy: IfNotPresent
command: [ "cni_ds_init" ]
+ env:
+ - name: CNI_DAEMON
+ value: "True"
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
securityContext:
privileged: true
volumeMounts:
@@ -38,6 +45,10 @@ spec:
subPath: kuryr-cni.conf
- name: etc
mountPath: /etc
+ - name: proc
+ mountPath: /host_proc
+ - name: openvswitch
+ mountPath: /var/run/openvswitch
volumes:
- name: bin
hostPath:
@@ -50,4 +61,10 @@ spec:
name: kuryr-config
- name: etc
hostPath:
- path: /etc \ No newline at end of file
+ path: /etc
+ - name: proc
+ hostPath:
+ path: /proc
+ - name: openvswitch
+ hostPath:
+ path: /var/run/openvswitch
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index 96c215f00..4bf1dbddf 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -16,17 +16,17 @@ data:
# Directory for Kuryr vif binding executables. (string value)
#bindir = /usr/libexec/kuryr
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
- # DEPRECATED: If set to false, the logging level will be set to WARNING instead
- # of the default INFO level. (boolean value)
- # This option is deprecated for removal.
- # Its value may be silently ignored in the future.
- #verbose = true
-
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
@@ -46,7 +46,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/kuryr-controller.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -65,13 +65,19 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
- #use_stderr = true
+ #use_stderr = false
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
@@ -93,7 +99,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -106,15 +112,86 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
- driver = kuryr.lib.binding.drivers.vlan
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
link_iface = eth0
+ driver = kuryr.lib.binding.drivers.vlan
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
+
[kubernetes]
#
@@ -164,11 +241,6 @@ data:
# The driver that manages VIFs pools for Kubernetes Pods (string value)
vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
- [vif_pool]
- ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
- ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
- ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
- ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
@@ -232,13 +304,55 @@ data:
external_svc_subnet = {{ kuryr_openstack_external_svc_subnet_id }}
[pod_vif_nested]
+
worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
+
kuryr-cni.conf: |+
[DEFAULT]
#
# From kuryr_kubernetes
#
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # Neutron subnetpool name will be prefixed by this. (string value)
+ #subnetpool_name_prefix = kuryrPool
+
+ # baremetal or nested-containers are the supported values. (string value)
+ #deployment_type = baremetal
+
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
@@ -263,7 +377,7 @@ data:
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
- #log_file = /var/log/kuryr/cni.log
+ #log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
@@ -282,6 +396,12 @@ data:
# is set. (boolean value)
#use_syslog = false
+ # Enable journald for logging. If running in a systemd environment you may wish
+ # to enable journal support. Doing so will use the journal native protocol
+ # which includes structured metadata in addition to log messages.This option is
+ # ignored if log_config_append is set. (boolean value)
+ #use_journal = false
+
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
@@ -310,7 +430,7 @@ data:
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
- #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
@@ -323,14 +443,85 @@ data:
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
+ # Interval, number of seconds, of log rate limiting. (integer value)
+ #rate_limit_interval = 0
+
+ # Maximum number of logged messages per rate_limit_interval. (integer value)
+ #rate_limit_burst = 0
+
+ # Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+ # or empty string. Logs with level greater or equal to rate_limit_except_level
+ # are not filtered. An empty string means that all levels are filtered. (string
+ # value)
+ #rate_limit_except_level = CRITICAL
+
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[binding]
+ # Configuration options for container interface binding.
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The name prefix of the veth endpoint put inside the container. (string value)
+ #veth_dst_prefix = eth
+
+ # Driver to use for binding and unbinding ports. (string value)
+ # Deprecated group/name - [binding]/driver
+ #default_driver = kuryr.lib.binding.drivers.veth
+
+ # Drivers to use for binding and unbinding ports. (list value)
+ #enabled_drivers = kuryr.lib.binding.drivers.veth
+
+ # Specifies the name of the Nova instance interface to link the virtual devices
+ # to (only applicable to some binding drivers. (string value)
+ link_iface = eth0
driver = kuryr.lib.binding.drivers.vlan
- link_iface = {{ kuryr_cni_link_interface }}
+
+
+ [cni_daemon]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Enable CNI Daemon configuration. (boolean value)
+ daemon_enabled = true
+
+ # Bind address for CNI daemon HTTP server. It is recommened to allow only local
+ # connections. (string value)
+ bind_address = 127.0.0.1:50036
+
+ # Maximum number of processes that will be spawned to process requests from CNI
+ # driver. (integer value)
+ #worker_num = 30
+
+ # Time (in seconds) the CNI daemon will wait for VIF annotation to appear in
+ # pod metadata before failing the CNI request. (integer value)
+ #vif_annotation_timeout = 120
+
+ # Kuryr uses pyroute2 library to manipulate networking interfaces. When
+ # processing a high number of Kuryr requests in parallel, it may take kernel
+ # more time to process all networking stack changes. This option allows to tune
+ # internal pyroute2 timeout. (integer value)
+ #pyroute2_timeout = 30
+
+ # Set to True when you are running kuryr-daemon inside a Docker container on
+ # Kubernetes host. E.g. as DaemonSet on Kubernetes cluster Kuryr is supposed to
+ # provide networking for. This mainly means thatkuryr-daemon will look for
+ # network namespaces in $netns_proc_dir instead of /proc. (boolean value)
+ docker_mode = true
+
+ # When docker_mode is set to True, this config option should be set to where
+ # host's /proc directory is mounted. Please note that mounting it is necessary
+ # to allow Kuryr-Kubernetes to move host interfaces between host network
+ # namespaces, which is essential for Kuryr to work. (string value)
+ netns_proc_dir = /host_proc
+
[kubernetes]
@@ -341,12 +532,136 @@ data:
# The root URL of the Kubernetes API (string value)
api_root = {{ openshift.master.api_url }}
- # The token to talk to the k8s API
- token_file = /etc/kuryr/token
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
# Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
- ssl_ca_crt_file = /etc/kuryr/ca.crt
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
# HTTPS K8S_API server identity verification (boolean value)
# TODO (apuimedo): Make configurable
ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+
+ [pool_manager]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Absolute path to socket file that will be used for communication with the
+ # Pool Manager daemon (string value)
+ #sock_file = /run/kuryr/kuryr_manage.sock
+
+
+ [vif_pool]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Set a maximun amount of ports per pool. 0 to disable (integer value)
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+
+ # Set a target minimum size of the pool of ports (integer value)
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+
+ # Number of ports to be created in a bulk request (integer value)
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+
+ # Minimun interval (in seconds) between pool updates (integer value)
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
diff --git a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
index eb13a58ba..eb13a58ba 100644
--- a/roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
+++ b/roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
index 58b228fee..58b228fee 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/lib_utils/filter_plugins/oo_cert_expiry.py
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index a2ea287cf..ef996fefe 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -4,6 +4,7 @@
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
import random
@@ -21,13 +22,10 @@ import yaml
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
- from ansible.compat.six.moves.urllib.parse import urlparse
-except ImportError:
- from ansible.module_utils.six import string_types, u
- from ansible.module_utils.six.moves.urllib.parse import urlparse
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six import string_types, u
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_OPENSSL = False
try:
@@ -589,6 +587,26 @@ that result to this filter plugin.
return secret_name
+def lib_utils_oo_l_of_d_to_csv(input_list):
+ """Map a list of dictionaries, input_list, into a csv string
+ of json values.
+
+ Example input:
+ [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
+ Example output:
+ u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
+ """
+ return ','.join(json.dumps(x) for x in input_list)
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(item.split(delim) for item in source.split(","))
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -618,4 +636,6 @@ class FilterModule(object):
"lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
+ "map_from_pairs": map_from_pairs
}
diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
index dfcb11da3..dfcb11da3 100644
--- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_aws_filters.py
diff --git a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
index 003ce5f9e..003ce5f9e 100644
--- a/roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
+++ b/roles/lib_utils/filter_plugins/openshift_hosted_filters.py
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/lib_utils/filter_plugins/openshift_master.py
index ff15f693b..e67b19c28 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/lib_utils/filter_plugins/openshift_master.py
@@ -10,11 +10,7 @@ from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.filter.core import to_bool as ansible_bool
-# ansible.compat.six goes away with Ansible 2.4
-try:
- from ansible.compat.six import string_types, u
-except ImportError:
- from ansible.module_utils.six import string_types, u
+from ansible.module_utils.six import string_types, u
import yaml
diff --git a/roles/etcd/library/delegated_serial_command.py b/roles/lib_utils/library/delegated_serial_command.py
index 0cab1ca88..0cab1ca88 100755
--- a/roles/etcd/library/delegated_serial_command.py
+++ b/roles/lib_utils/library/delegated_serial_command.py
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/lib_utils/library/openshift_cert_expiry.py
index e355266b0..e355266b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/lib_utils/library/openshift_cert_expiry.py
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py
index 440b8ec28..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/lib_utils/library/openshift_container_binary_sync.py
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
new file mode 100644
index 000000000..4858c5ec6
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -0,0 +1,143 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, regions_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ predicates = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to enterprise short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ # Predicates ordered according to OpenShift Origin source:
+ # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
+ if short_version == '3.1':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+ ])
+
+ if short_version == '3.2':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+ ])
+
+ if short_version == '3.3':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+ ])
+
+ if short_version == '3.4':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+ ])
+
+ if short_version in ['3.5', '3.6']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ ])
+
+ if short_version in ['3.7', '3.8', '3.9']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MaxAzureDiskVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'NoVolumeNodeConflict'},
+ ])
+
+ if regions_enabled:
+ region_predicate = {
+ 'name': 'Region',
+ 'argument': {
+ 'serviceAffinity': {
+ 'labels': ['region']
+ }
+ }
+ }
+ predicates.append(region_predicate)
+
+ return predicates
diff --git a/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
new file mode 100644
index 000000000..18e1b2e0c
--- /dev/null
+++ b/roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -0,0 +1,117 @@
+# pylint: disable=missing-docstring
+
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
+
+ def run(self, terms, variables=None, zones_enabled=True, short_version=None,
+ deployment_type=None, **kwargs):
+
+ priorities = []
+
+ if short_version is None or deployment_type is None:
+ if 'openshift' not in variables:
+ raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
+
+ if deployment_type is None:
+ if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
+ raise AnsibleError("This lookup module requires that the deployment_type be set")
+
+ deployment_type = variables['openshift']['common']['deployment_type']
+
+ if short_version is None:
+ if 'short_version' in variables['openshift']['common']:
+ short_version = variables['openshift']['common']['short_version']
+ elif 'openshift_release' in variables:
+ release = variables['openshift_release']
+ if release.startswith('v'):
+ short_version = release[1:]
+ else:
+ short_version = release
+ short_version = '.'.join(short_version.split('.')[0:2])
+ elif 'openshift_version' in variables:
+ version = variables['openshift_version']
+ short_version = '.'.join(version.split('.')[0:2])
+ else:
+ # pylint: disable=line-too-long
+ raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
+
+ if deployment_type == 'origin':
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ elif deployment_type == 'openshift-enterprise':
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9', 'latest']:
+ raise AnsibleError("Unknown short_version %s" % short_version)
+ else:
+ raise AnsibleError("Unknown deployment_type %s" % deployment_type)
+
+ if deployment_type == 'origin':
+ # convert short_version to origin short_version
+ short_version = re.sub('^1.', '3.', short_version)
+
+ if short_version == 'latest':
+ short_version = '3.9'
+
+ if short_version == '3.1':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.2':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.3':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if short_version == '3.4':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version in ['3.5', '3.6', '3.7', '3.8', '3.9']:
+ priorities.extend([
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1},
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
+
+ if zones_enabled:
+ zone_priority = {
+ 'name': 'Zone',
+ 'argument': {
+ 'serviceAntiAffinity': {
+ 'label': 'zone'
+ }
+ },
+ 'weight': 2
+ }
+ priorities.append(zone_priority)
+
+ return priorities
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/lib_utils/test/conftest.py
index df948fff0..aabdd4fa1 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/lib_utils/test/conftest.py
@@ -1,7 +1,15 @@
# pylint: disable=missing-docstring,invalid-name,redefined-outer-name
+import os
import pytest
+import sys
+
from OpenSSL import crypto
+sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
+
+from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule # noqa: E402
+from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule # noqa: E402
+
# Parameter list for valid_cert fixture
VALID_CERTIFICATE_PARAMS = [
{
@@ -117,3 +125,48 @@ def valid_cert(request, ca):
'cert_file': cert_file,
'cert': cert
}
+
+
+@pytest.fixture()
+def predicates_lookup():
+ return PredicatesLookupModule()
+
+
+@pytest.fixture()
+def priorities_lookup():
+ return PrioritiesLookupModule()
+
+
+@pytest.fixture()
+def facts():
+ return {
+ 'openshift': {
+ 'common': {}
+ }
+ }
+
+
+@pytest.fixture(params=[True, False])
+def regions_enabled(request):
+ return request.param
+
+
+@pytest.fixture(params=[True, False])
+def zones_enabled(request):
+ return request.param
+
+
+def v_prefix(release):
+ """Prefix a release number with 'v'."""
+ return "v" + release
+
+
+def minor(release):
+ """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
+ return release + ".1"
+
+
+@pytest.fixture(params=[str, v_prefix, minor])
+def release_mod(request):
+ """Modifies a release string to alternative valid values."""
+ return request.param
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
index e8da1e04a..e8da1e04a 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_bad_input_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/lib_utils/test/openshift_master_facts_conftest.py
index 140cced73..140cced73 100644
--- a/roles/openshift_master_facts/test/conftest.py
+++ b/roles/lib_utils/test/openshift_master_facts_conftest.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
index 11aad9f03..11aad9f03 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
index 527fc9ff4..527fc9ff4 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/lib_utils/test/test_fakeopensslclasses.py
index 8a521a765..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/lib_utils/test/test_fakeopensslclasses.py
diff --git a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py b/roles/lib_utils/test/test_load_and_handle_cert.py
index 98792e2ee..98792e2ee 100644
--- a/roles/openshift_certificate_expiry/test/test_load_and_handle_cert.py
+++ b/roles/lib_utils/test/test_load_and_handle_cert.py
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 71de24339..a729e8dbd 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -98,17 +98,26 @@ openshift_aws_elb_dict:
proxy_protocol: True
openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: False
openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sda1
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
- device_name: /dev/sdb
volume_size: 100
device_type: gp2
delete_on_termination: True
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_node_group_termination_policy: Default
openshift_aws_node_group_replace_instances: []
@@ -201,6 +210,7 @@ openshift_aws_node_group_config:
openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
openshift_aws_elb_az_load_balancing: False
+# build_instance_tags is a custom filter in role lib_utils
openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}"
openshift_aws_elb_security_groups: "{{ openshift_aws_launch_config_security_groups }}"
@@ -291,3 +301,7 @@ openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
openshift_aws_masters_groups: masters,etcd,nodes
+
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml
index c2a2cea30..db30fe5c9 100644
--- a/roles/openshift_aws/tasks/accept_nodes.yml
+++ b/roles/openshift_aws/tasks/accept_nodes.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: setup_master_group.yml
+
- name: fetch masters
ec2_instance_facts:
region: "{{ openshift_aws_region | default('us-east-1') }}"
@@ -36,4 +38,4 @@
nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
timeout: 60
register: nodeout
- delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
+ delegate_to: "{{ groups.masters.0 }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 9485cc3ac..a9f9cc3c4 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -43,6 +43,7 @@
- name: set the value for the deployment_serial and the current asgs
set_fact:
+ # scale_groups_serial is a custom filter in role lib_utils
l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}"
openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index 786a2e4cf..2b5f317d8 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,23 +1,6 @@
---
-- when: openshift_aws_create_iam_cert | bool
- name: create the iam_cert for elb certificate
- include_tasks: iam_cert.yml
-
-- when: openshift_aws_create_s3 | bool
- name: create s3 bucket for registry
- include_tasks: s3.yml
-
- include_tasks: vpc_and_subnet_id.yml
-- name: create elbs
- include_tasks: elb.yml
- with_dict: "{{ openshift_aws_elb_dict }}"
- vars:
- l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
- l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
- loop_control:
- loop_var: l_elb_dict_item
-
- name: include scale group creation for master
include_tasks: build_node_group.yml
with_items: "{{ openshift_aws_master_group }}"
diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml
new file mode 100644
index 000000000..a52f63bd5
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_elb.yml
@@ -0,0 +1,15 @@
+---
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include_tasks: iam_cert.yml
+
+- include_tasks: vpc_and_subnet_id.yml
+
+- name: create elbs
+ include_tasks: elb.yml
+ with_dict: "{{ openshift_aws_elb_dict }}"
+ vars:
+ l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}"
+ l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}"
+ loop_control:
+ loop_var: l_elb_dict_item
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
index d82f18574..9105b5b4c 100644
--- a/roles/openshift_aws/tasks/provision_nodes.yml
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -2,25 +2,12 @@
# Get bootstrap config token
# bootstrap should be created on first master
# need to fetch it and shove it into cloud data
-- name: fetch master instances
- ec2_instance_facts:
- region: "{{ openshift_aws_region }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until:
- - "'instances' in instancesout"
- - instancesout.instances|length > 0
+- include_tasks: setup_master_group.yml
- name: slurp down the bootstrap.kubeconfig
slurp:
src: /etc/origin/master/bootstrap.kubeconfig
- delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
- remote_user: root
+ delegate_to: "{{ groups.masters.0 }}"
register: bootstrap
- name: set_fact for kubeconfig token
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml
index 1f4ef3e1c..3ad876e37 100644
--- a/roles/openshift_aws/tasks/wait_for_groups.yml
+++ b/roles/openshift_aws/tasks/wait_for_groups.yml
@@ -8,6 +8,7 @@
tags:
"{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}"
register: qasg
+ # scale_groups_match_capacity is a custom filter in role lib_utils
until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool
delay: 10
retries: 60
diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml
index cf49a6ebf..df53280c8 100644
--- a/roles/openshift_buildoverrides/vars/main.yml
+++ b/roles/openshift_buildoverrides/vars/main.yml
@@ -9,3 +9,4 @@ buildoverrides_yaml:
imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
+ tolerations: "{{ openshift_buildoverrides_tolerations | default(None) }}"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b94cd9fba..9c8534c74 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -19,7 +19,8 @@
- name: Reload generated facts
openshift_facts:
- when: hostvars[openshift_ca_host].install_result is changed
+ when:
+ - hostvars[openshift_ca_host].install_result | default({'changed':false}) is changed
- name: Create openshift_ca_config_dir if it does not exist
file:
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index 8dea2c07f..7062b5060 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -16,7 +16,9 @@
- name: Generate the result JSON string
run_once: yes
- set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
+ set_fact:
+ # oo_cert_expiry_results_to_json is a custom filter in role lib_utils
+ json_result_string: "{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 37bed9dbe..ae8d1ace0 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -12,6 +12,7 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ openshift_cli_image }}"
@@ -28,6 +29,7 @@
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
+ # openshift_container_binary_sync is a custom module in lib_utils
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index dff492a69..3513577fa 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -19,3 +19,6 @@
- include_tasks: gce.yml
when: cloudprovider_is_gce | bool
+
+- include_tasks: vsphere.yml
+ when: cloudprovider_is_vsphere | bool
diff --git a/roles/openshift_cloud_provider/tasks/vsphere.yml b/roles/openshift_cloud_provider/tasks/vsphere.yml
new file mode 100644
index 000000000..3a33df241
--- /dev/null
+++ b/roles/openshift_cloud_provider/tasks/vsphere.yml
@@ -0,0 +1,6 @@
+---
+- name: Create cloud config
+ template:
+ dest: "{{ openshift.common.config_base }}/cloudprovider/vsphere.conf"
+ src: vsphere.conf.j2
+ when: openshift_cloudprovider_vsphere_username is defined and openshift_cloudprovider_vsphere_password is defined and openshift_cloudprovider_vsphere_host is defined and openshift_cloudprovider_vsphere_datacenter is defined and openshift_cloudprovider_vsphere_datastore is defined
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 313ee02b4..30f18ffa9 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -19,3 +19,7 @@ region = {{ openshift_cloudprovider_openstack_region }}
[LoadBalancer]
subnet-id = {{ openshift_cloudprovider_openstack_lb_subnet_id }}
{% endif %}
+{% if openshift_cloudprovider_openstack_blockstorage_version is defined %}
+[BlockStorage]
+bs-version={{ openshift_cloudprovider_openstack_blockstorage_version }}
+{% endif %} \ No newline at end of file
diff --git a/roles/openshift_cloud_provider/templates/vsphere.conf.j2 b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
new file mode 100644
index 000000000..84e5e371c
--- /dev/null
+++ b/roles/openshift_cloud_provider/templates/vsphere.conf.j2
@@ -0,0 +1,15 @@
+[Global]
+user = "{{ openshift_cloudprovider_vsphere_username }}"
+password = "{{ openshift_cloudprovider_vsphere_password }}"
+server = "{{ openshift_cloudprovider_vsphere_host }}"
+port = 443
+insecure-flag = 1
+datacenter = {{ openshift_cloudprovider_vsphere_datacenter }}
+datastore = {{ openshift_cloudprovider_vsphere_datastore }}
+{% if openshift_cloudprovider_vsphere_folder is defined %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}/
+{% else %}
+working-dir = /{{ openshift_cloudprovider_vsphere_datacenter }}/vm/
+{% endif %}
+[Disk]
+scsicontrollertype = pvscsi
diff --git a/roles/openshift_cloud_provider/vars/main.yml b/roles/openshift_cloud_provider/vars/main.yml
index c9d953f58..e71db80b9 100644
--- a/roles/openshift_cloud_provider/vars/main.yml
+++ b/roles/openshift_cloud_provider/vars/main.yml
@@ -3,3 +3,4 @@ has_cloudprovider: "{{ openshift_cloudprovider_kind | default(None) != None }}"
cloudprovider_is_aws: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'aws' }}"
cloudprovider_is_openstack: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'openstack' }}"
cloudprovider_is_gce: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'gce' }}"
+cloudprovider_is_vsphere: "{{ has_cloudprovider | bool and openshift_cloudprovider_kind == 'vsphere' }}"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 68a0e8857..648bf7293 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,7 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.4.7
-ORIGIN_VERSION=${1:-v3.7}
+ORIGIN_VERSION=${1:-v3.9}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
index 217ef11dd..92be8f42e 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mariadb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mariadb-persistent",
"annotations": {
- "openshift.io/display-name": "MariaDB (Persistent)",
+ "openshift.io/display-name": "MariaDB",
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
index 97e4128a4..4e3e64d48 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mongodb-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mongodb-persistent",
"annotations": {
- "openshift.io/display-name": "MongoDB (Persistent)",
+ "openshift.io/display-name": "MongoDB",
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
index 48ac114fd..6ac80f3a0 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/mysql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "mysql-persistent",
"annotations": {
- "openshift.io/display-name": "MySQL (Persistent)",
+ "openshift.io/display-name": "MySQL",
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
index 8a2d23907..190509112 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/postgresql-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "postgresql-persistent",
"annotations": {
- "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "openshift.io/display-name": "PostgreSQL",
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
diff --git a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
index e0e0a88d5..d1103d3af 100644
--- a/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/db-templates/redis-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "redis-persistent",
"annotations": {
- "openshift.io/display-name": "Redis (Persistent)",
+ "openshift.io/display-name": "Redis",
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
index e7af160d9..ad17b709e 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-centos7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "centos/python-35-centos7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-36-centos7:latest"
+ }
}
]
}
@@ -944,7 +961,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "openshift/jenkins-2-centos7:latest"
+ "name": "openshift/jenkins-2-centos7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
index 2b082fc75..efc8705f4 100644
--- a/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.9/image-streams/image-streams-rhel7.json
@@ -407,7 +407,7 @@
"annotations": {
"openshift.io/display-name": "Python (Latest)",
"openshift.io/provider-display-name": "Red Hat, Inc.",
- "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python",
@@ -415,7 +415,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "3.5"
+ "name": "3.6"
}
},
{
@@ -485,6 +485,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
}
+ },
+ {
+ "name": "3.6",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.6,python",
+ "version": "3.6",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest"
+ }
}
]
}
@@ -846,7 +863,7 @@
},
"from": {
"kind": "DockerImage",
- "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:v3.9"
}
}
]
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
index 86ddc184a..40b4eaa81 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "cakephp-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "openshift.io/display-name": "CakePHP + MySQL",
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-persistent"
+ "template": "cakephp-mysql-persistent",
+ "app": "cakephp-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
index 3c964bd6a..ecd90e495 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/cakephp-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
"labels": {
- "template": "cakephp-mysql-example"
+ "template": "cakephp-mysql-example",
+ "app": "cakephp-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
index 0a10c5fbc..17a155600 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "dancer-mysql-persistent",
"annotations": {
- "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "openshift.io/display-name": "Dancer + MySQL",
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-persistent"
+ "template": "dancer-mysql-persistent",
+ "app": "dancer-mysql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
index 6122d5436..abf711535 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/dancer-mysql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"labels": {
- "template": "dancer-mysql-example"
+ "template": "dancer-mysql-example",
+ "app": "dancer-mysql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
index f3b5838fa..c8dab0b53 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "django-psql-persistent",
"annotations": {
- "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Django + PostgreSQL",
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-persistent"
+ "template": "django-psql-persistent",
+ "app": "django-psql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
index b21295df2..6395defda 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/django-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"labels": {
- "template": "django-psql-example"
+ "template": "django-psql-example",
+ "app": "django-psql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
index 3771280bf..e944f21a5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/httpd.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"labels": {
- "template": "httpd-example"
+ "template": "httpd-example",
+ "app": "httpd-example"
},
"objects": [
{
@@ -198,12 +199,7 @@
}
},
"env": [
- ],
- "resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
- }
+ ]
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
index 28b4b9d81..87ae6ed14 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-ephemeral-template.json
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-ephemeral",
+ "template": "jenkins-ephemeral-template"
+ },
"objects": [
{
"kind": "Route",
@@ -275,10 +279,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-ephemeral-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
index 4915bb12c..95d15b55f 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/jenkins-persistent-template.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "jenkins-persistent",
"annotations": {
- "openshift.io/display-name": "Jenkins (Persistent)",
+ "openshift.io/display-name": "Jenkins",
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
@@ -15,6 +15,10 @@
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "labels": {
+ "app": "jenkins-persistent",
+ "template": "jenkins-persistent-template"
+ },
"objects": [
{
"kind": "Route",
@@ -299,10 +303,7 @@
"name": "JENKINS_IMAGE_STREAM_TAG",
"displayName": "Jenkins ImageStreamTag",
"description": "Name of the ImageStreamTag to be used for the Jenkins image.",
- "value": "jenkins:latest"
+ "value": "jenkins:2"
}
- ],
- "labels": {
- "template": "jenkins-persistent-template"
- }
+ ]
}
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
index 7f2a5d804..f04adaa67 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "nodejs-mongo-persistent",
"annotations": {
- "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "openshift.io/display-name": "Node.js + MongoDB",
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongo-persistent"
+ "template": "nodejs-mongo-persistent",
+ "app": "nodejs-mongo-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
index b3afae46e..0ce36dba5 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/nodejs-mongodb.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"labels": {
- "template": "nodejs-mongodb-example"
+ "template": "nodejs-mongodb-example",
+ "app": "nodejs-mongodb-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
index 1c03be28a..10e9382cc 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql-persistent.json
@@ -4,7 +4,7 @@
"metadata": {
"name": "rails-pgsql-persistent",
"annotations": {
- "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "openshift.io/display-name": "Rails + PostgreSQL",
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-pgsql-persistent"
+ "template": "rails-pgsql-persistent",
+ "app": "rails-pgsql-persistent"
},
"objects": [
{
diff --git a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
index 240289d33..8ec2c8ea6 100644
--- a/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.9/quickstart-templates/rails-postgresql.json
@@ -17,7 +17,8 @@
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"labels": {
- "template": "rails-postgresql-example"
+ "template": "rails-postgresql-example",
+ "app": "rails-postgresql-example"
},
"objects": [
{
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 1a34c85fc..9f46a4683 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- role: lib_utils
+- role: openshift_facts
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
index 4f5277fa2..22a3fcd3b 100644
--- a/roles/openshift_excluder/tasks/verify_excluder.yml
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -3,7 +3,7 @@
# - excluder
- name: Get available excluder version
repoquery:
- name: "{{ excluder }}"
+ name: "{{ excluder }}{{ '-' ~ r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.') ~ '*' if r_openshift_excluder_upgrade_target is defined else '' }}"
ignore_excluders: true
register: repoquery_out
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 5ae863871..b38ebdfb4 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -8,7 +8,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
+ failed_when: has_growpart.rc != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift_is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d7c358a2f..26f0525e9 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1465,6 +1465,11 @@ class OpenShiftFacts(object):
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
+ elif bios_vendor == 'Amazon EC2':
+ # Adds support for Amazon EC2 C5 instance types
+ provider = 'aws'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = get_provider_metadata(metadata_url)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
diff --git a/roles/openshift_grafana/defaults/main.yml b/roles/openshift_grafana/defaults/main.yml
new file mode 100644
index 000000000..7fd7a085d
--- /dev/null
+++ b/roles/openshift_grafana/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+gf_body_tmp:
+ name: grafana_name
+ type: prometheus
+ typeLogoUrl: ''
+ access: proxy
+ url: prometheus_url
+ basicAuth: false
+ withCredentials: false
+ jsonData:
+ tlsSkipVerify: true
+ token: satoken
diff --git a/roles/openshift_grafana/files/grafana-ocp-oauth.yml b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
new file mode 100644
index 000000000..82fa89004
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp-oauth.yml
@@ -0,0 +1,661 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: The location of the proxy image
+ name: IMAGE_GF
+ value: mrsiano/grafana-ocp:latest
+- description: The location of the proxy image
+ name: IMAGE_PROXY
+ value: openshift/oauth-proxy:v1.0.0
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+- description: The session secret for the proxy
+ name: SESSION_SECRET
+ generate: expression
+ from: "[a-zA-Z0-9]{43}"
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ annotations:
+ serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana-ocp"}}'
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: gf-cluster-reader
+ roleRef:
+ name: cluster-reader
+ subjects:
+ - kind: ServiceAccount
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+ tls:
+ termination: Reencrypt
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/scheme: https
+ service.alpha.openshift.io/serving-cert-secret-name: gf-tls
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ ports:
+ - name: grafana-ocp
+ port: 443
+ protocol: TCP
+ targetPort: 8443
+ selector:
+ app: grafana-ocp
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: gf-proxy
+ namespace: "${NAMESPACE}"
+ stringData:
+ session_secret: "${SESSION_SECRET}="
+# Deploy Prometheus behind an oauth proxy
+- apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: grafana-ocp
+ template:
+ metadata:
+ labels:
+ app: grafana-ocp
+ name: grafana-ocp-app
+ spec:
+ serviceAccountName: grafana-ocp
+ containers:
+ - name: oauth-proxy
+ image: ${IMAGE_PROXY}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -client-id=system:serviceaccount:${NAMESPACE}:grafana-ocp
+ - -upstream=http://localhost:3000
+ - -provider=openshift
+# - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "grafana-ocp", "namespace": "${NAMESPACE}"}}'
+ - '-openshift-sar={"namespace": "${NAMESPACE}", "verb": "list", "resource": "services"}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+
+ - name: grafana-ocp
+ image: ${IMAGE_GF}
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ name: gf-data
+ - mountPath: "/root/go/src/github.com/grafana/grafana/conf"
+ name: gfconfig
+ - mountPath: /etc/tls/private
+ name: gf-tls
+ - mountPath: /etc/proxy/secrets
+ name: secrets
+ command:
+ - "./bin/grafana-server"
+
+ volumes:
+ - name: gfconfig
+ configMap:
+ name: gf-config
+ - name: secrets
+ secret:
+ secretName: gf-proxy
+ - name: gf-tls
+ secret:
+ secretName: gf-tls
+ - emptyDir: {}
+ name: gf-data
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: gf-config
+ namespace: "${NAMESPACE}"
+ data:
+ defaults.ini: |-
+ ##################### Grafana Configuration Defaults #####################
+ #
+ # Do not modify this file in grafana installs
+ #
+
+ # possible values : production, development
+ app_mode = production
+
+ # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
+ instance_name = ${HOSTNAME}
+
+ #################################### Paths ###############################
+ [paths]
+ # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
+ #
+ data = data
+ #
+ # Directory where grafana can store logs
+ #
+ logs = data/log
+ #
+ # Directory where grafana will automatically scan and look for plugins
+ #
+ plugins = data/plugins
+
+ #################################### Server ##############################
+ [server]
+ # Protocol (http, https, socket)
+ protocol = http
+
+ # The ip address to bind to, empty will bind to all interfaces
+ http_addr =
+
+ # The http port to use
+ http_port = 3000
+
+ # The public facing domain name used to access grafana from a browser
+ domain = localhost
+
+ # Redirect to correct domain if host header does not match domain
+ # Prevents DNS rebinding attacks
+ enforce_domain = false
+
+ # The full public facing url
+ root_url = %(protocol)s://%(domain)s:%(http_port)s/
+
+ # Log web requests
+ router_logging = false
+
+ # the path relative working path
+ static_root_path = public
+
+ # enable gzip
+ enable_gzip = false
+
+ # https certs & key file
+ cert_file = /etc/tls/private/tls.crt
+ cert_key = /etc/tls/private/tls.key
+
+ # Unix socket path
+ socket = /tmp/grafana.sock
+
+ #################################### Database ############################
+ [database]
+ # You can configure the database connection by specifying type, host, name, user and password
+ # as separate properties or as on string using the url property.
+
+ # Either "mysql", "postgres" or "sqlite3", it's your choice
+ type = sqlite3
+ host = 127.0.0.1:3306
+ name = grafana
+ user = root
+ # If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
+ password =
+ # Use either URL or the previous fields to configure the database
+ # Example: mysql://user:secret@host:port/database
+ url =
+
+ # Max idle conn setting default is 2
+ max_idle_conn = 2
+
+ # Max conn setting default is 0 (mean not set)
+ max_open_conn =
+
+ # For "postgres", use either "disable", "require" or "verify-full"
+ # For "mysql", use either "true", "false", or "skip-verify".
+ ssl_mode = disable
+
+ ca_cert_path =
+ client_key_path =
+ client_cert_path =
+ server_cert_name =
+
+ # For "sqlite3" only, path relative to data_path setting
+ path = grafana.db
+
+ #################################### Session #############################
+ [session]
+ # Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
+ provider = file
+
+ # Provider config options
+ # memory: not have any config yet
+ # file: session dir path, is relative to grafana data_path
+ # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
+ # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
+ # mysql: go-sql-driver/mysql dsn config string, examples:
+ # `user:password@tcp(127.0.0.1:3306)/database_name`
+ # `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
+ # memcache: 127.0.0.1:11211
+
+
+ provider_config = sessions
+
+ # Session cookie name
+ cookie_name = grafana_sess
+
+ # If you use session in https only, default is false
+ cookie_secure = false
+
+ # Session life time, default is 86400
+ session_life_time = 86400
+ gc_interval_time = 86400
+
+ #################################### Data proxy ###########################
+ [dataproxy]
+
+ # This enables data proxy logging, default is false
+ logging = false
+
+ #################################### Analytics ###########################
+ [analytics]
+ # Server reporting, sends usage counters to stats.grafana.org every 24 hours.
+ # No ip addresses are being tracked, only simple counters to track
+ # running instances, dashboard and error counts. It is very helpful to us.
+ # Change this option to false to disable reporting.
+ reporting_enabled = true
+
+ # Set to false to disable all checks to https://grafana.com
+ # for new versions (grafana itself and plugins), check is used
+ # in some UI views to notify that grafana or plugin update exists
+ # This option does not cause any auto updates, nor send any information
+ # only a GET request to https://grafana.com to get latest versions
+ check_for_updates = true
+
+ # Google Analytics universal tracking code, only enabled if you specify an id here
+ google_analytics_ua_id =
+
+ # Google Tag Manager ID, only enabled if you specify an id here
+ google_tag_manager_id =
+
+ #################################### Security ############################
+ [security]
+ # default admin user, created on startup
+ admin_user = admin
+
+ # default admin password, can be changed before first start of grafana, or in profile settings
+ admin_password = admin
+
+ # used for signing
+ secret_key = SW2YcwTIb9zpOOhoPsMm
+
+ # Auto-login remember days
+ login_remember_days = 7
+ cookie_username = grafana_user
+ cookie_remember_name = grafana_remember
+
+ # disable gravatar profile images
+ disable_gravatar = false
+
+ # data source proxy whitelist (ip_or_domain:port separated by spaces)
+ data_source_proxy_whitelist =
+
+ [snapshots]
+ # snapshot sharing options
+ external_enabled = true
+ external_snapshot_url = https://snapshots-origin.raintank.io
+ external_snapshot_name = Publish to snapshot.raintank.io
+
+ # remove expired snapshot
+ snapshot_remove_expired = true
+
+ # remove snapshots after 90 days
+ snapshot_TTL_days = 90
+
+ #################################### Users ####################################
+ [users]
+ # disable user signup / registration
+ allow_sign_up = true
+
+ # Allow non admin users to create organizations
+ allow_org_create = true
+
+ # Set to true to automatically assign new users to the default organization (id 1)
+ auto_assign_org = true
+
+ # Default role new users will be automatically assigned (if auto_assign_org above is set to true)
+ auto_assign_org_role = Admin
+
+ # Require email validation before sign up completes
+ verify_email_enabled = false
+
+ # Background text for the user field on the login page
+ login_hint = email or username
+
+ # Default UI theme ("dark" or "light")
+ default_theme = dark
+
+ # External user management
+ external_manage_link_url =
+ external_manage_link_name =
+ external_manage_info =
+
+ [auth]
+ # Set to true to disable (hide) the login form, useful if you use OAuth
+ disable_login_form = true
+
+ # Set to true to disable the signout link in the side menu. useful if you use auth.proxy
+ disable_signout_menu = true
+
+ #################################### Anonymous Auth ######################
+ [auth.anonymous]
+ # enable anonymous access
+ enabled = true
+
+ # specify organization name that should be used for unauthenticated users
+ org_name = Main Org.
+
+ # specify role for unauthenticated users
+ org_role = Admin
+
+ #################################### Github Auth #########################
+ [auth.github]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url = https://github.com/login/oauth/authorize
+ token_url = https://github.com/login/oauth/access_token
+ api_url = https://api.github.com/user
+ team_ids =
+ allowed_organizations =
+
+ #################################### Google Auth #########################
+ [auth.google]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_client_id
+ client_secret = some_client_secret
+ scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
+ auth_url = https://accounts.google.com/o/oauth2/auth
+ token_url = https://accounts.google.com/o/oauth2/token
+ api_url = https://www.googleapis.com/oauth2/v1/userinfo
+ allowed_domains =
+ hosted_domain =
+
+ #################################### Grafana.com Auth ####################
+ # legacy key names (so they work in env variables)
+ [auth.grafananet]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ [auth.grafana_com]
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ allowed_organizations =
+
+ #################################### Generic OAuth #######################
+ [auth.generic_oauth]
+ name = OAuth
+ enabled = false
+ allow_sign_up = true
+ client_id = some_id
+ client_secret = some_secret
+ scopes = user:email
+ auth_url =
+ token_url =
+ api_url =
+ team_ids =
+ allowed_organizations =
+
+ #################################### Basic Auth ##########################
+ [auth.basic]
+ enabled = false
+
+ #################################### Auth Proxy ##########################
+ [auth.proxy]
+ enabled = true
+ header_name = X-WEBAUTH-USER
+ header_property = username
+ auto_sign_up = true
+ ldap_sync_ttl = 60
+ whitelist =
+
+ #################################### Auth LDAP ###########################
+ [auth.ldap]
+ enabled = false
+ config_file = /etc/grafana/ldap.toml
+ allow_sign_up = true
+
+ #################################### SMTP / Emailing #####################
+ [smtp]
+ enabled = false
+ host = localhost:25
+ user =
+ # If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
+ password =
+ cert_file =
+ key_file =
+ skip_verify = false
+ from_address = admin@grafana.localhost
+ from_name = Grafana
+ ehlo_identity =
+
+ [emails]
+ welcome_email_on_sign_up = false
+ templates_pattern = emails/*.html
+
+ #################################### Logging ##########################
+ [log]
+ # Either "console", "file", "syslog". Default is console and file
+ # Use space to separate multiple modes, e.g. "console file"
+ mode = console file
+
+ # Either "debug", "info", "warn", "error", "critical", default is "info"
+ level = error
+
+ # optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
+ filters =
+
+ # For "console" mode only
+ [log.console]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = console
+
+ # For "file" mode only
+ [log.file]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # This enables automated log rotate(switch of following options), default is true
+ log_rotate = true
+
+ # Max line number of single file, default is 1000000
+ max_lines = 1000000
+
+ # Max size shift of single file, default is 28 means 1 << 28, 256MB
+ max_size_shift = 28
+
+ # Segment log daily, default is true
+ daily_rotate = true
+
+ # Expired days of log file(delete after max days), default is 7
+ max_days = 7
+
+ [log.syslog]
+ level =
+
+ # log line format, valid options are text, console and json
+ format = text
+
+ # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
+ network =
+ address =
+
+ # Syslog facility. user, daemon and local0 through local7 are valid.
+ facility =
+
+ # Syslog tag. By default, the process' argv[0] is used.
+ tag =
+
+
+ #################################### AMQP Event Publisher ################
+ [event_publisher]
+ enabled = false
+ rabbitmq_url = amqp://localhost/
+ exchange = grafana_events
+
+ #################################### Dashboard JSON files ################
+ [dashboards.json]
+ enabled = false
+ path = /var/lib/grafana/dashboards
+
+ #################################### Usage Quotas ########################
+ [quota]
+ enabled = false
+
+ #### set quotas to -1 to make unlimited. ####
+ # limit number of users per Org.
+ org_user = 10
+
+ # limit number of dashboards per Org.
+ org_dashboard = 100
+
+ # limit number of data_sources per Org.
+ org_data_source = 10
+
+ # limit number of api_keys per Org.
+ org_api_key = 10
+
+ # limit number of orgs a user can create.
+ user_org = 10
+
+ # Global limit of users.
+ global_user = -1
+
+ # global limit of orgs.
+ global_org = -1
+
+ # global limit of dashboards
+ global_dashboard = -1
+
+ # global limit of api_keys
+ global_api_key = -1
+
+ # global limit on number of logged in users.
+ global_session = -1
+
+ #################################### Alerting ############################
+ [alerting]
+ # Disable alerting engine & UI features
+ enabled = true
+ # Makes it possible to turn off alert rule execution but alerting UI is visible
+ execute_alerts = true
+
+ #################################### Internal Grafana Metrics ############
+ # Metrics available at HTTP API Url /api/metrics
+ [metrics]
+ enabled = true
+ interval_seconds = 10
+
+ # Send internal Grafana metrics to graphite
+ [metrics.graphite]
+ # Enable by setting the address setting (ex localhost:2003)
+ address =
+ prefix = prod.grafana.%(instance_name)s.
+
+ [grafana_net]
+ url = https://grafana.com
+
+ [grafana_com]
+ url = https://grafana.com
+
+ #################################### Distributed tracing ############
+ [tracing.jaeger]
+ # jaeger destination (ex localhost:6831)
+ address =
+ # tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
+ always_included_tag =
+ # Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
+ sampler_type = const
+ # jaeger samplerconfig param
+ # for "const" sampler, 0 or 1 for always false/true respectively
+ # for "probabilistic" sampler, a probability between 0 and 1
+ # for "rateLimiting" sampler, the number of spans per second
+ # for "remote" sampler, param is the same as for "probabilistic"
+ # and indicates the initial sampling rate before the actual one
+ # is received from the mothership
+ sampler_param = 1
+
+ #################################### External Image Storage ##############
+ [external_image_storage]
+ # You can choose between (s3, webdav, gcs)
+ provider =
+
+ [external_image_storage.s3]
+ bucket_url =
+ bucket =
+ region =
+ path =
+ access_key =
+ secret_key =
+
+ [external_image_storage.webdav]
+ url =
+ username =
+ password =
+ public_url =
+
+ [external_image_storage.gcs]
+ key_file =
+ bucket =
diff --git a/roles/openshift_grafana/files/grafana-ocp.yml b/roles/openshift_grafana/files/grafana-ocp.yml
new file mode 100644
index 000000000..bc7b4b286
--- /dev/null
+++ b/roles/openshift_grafana/files/grafana-ocp.yml
@@ -0,0 +1,76 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: grafana-ocp
+ annotations:
+ "openshift.io/display-name": Grafana ocp
+ description: |
+ Grafana server with patched Prometheus datasource.
+ iconClass: icon-cogs
+ tags: "metrics,monitoring,grafana,prometheus"
+parameters:
+- description: External URL for the grafana route
+ name: ROUTE_URL
+ value: ""
+- description: The namespace to instantiate heapster under. Defaults to 'grafana'.
+ name: NAMESPACE
+ value: grafana
+objects:
+- apiVersion: route.openshift.io/v1
+ kind: Route
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ spec:
+ host: "${ROUTE_URL}"
+ to:
+ name: grafana-ocp
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ ports:
+ - port: 8082
+ protocol: TCP
+ targetPort: grafana-http
+- apiVersion: v1
+ kind: ReplicationController
+ metadata:
+ name: grafana-ocp
+ namespace: "${NAMESPACE}"
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ selector:
+ name: grafana-ocp
+ replicas: 1
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: grafana-ocp
+ name: grafana-ocp
+ spec:
+ volumes:
+ - name: data
+ emptyDir: {}
+ containers:
+ - image: "mrsiano/grafana-ocp:latest"
+ name: grafana-ocp
+ ports:
+ - name: grafana-http
+ containerPort: 3000
+ volumeMounts:
+ - name: data
+ mountPath: "/root/go/src/github.com/grafana/grafana/data"
+ command:
+ - "./bin/grafana-server"
diff --git a/roles/openshift_grafana/files/openshift-cluster-monitoring.json b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
new file mode 100644
index 000000000..f59ca997f
--- /dev/null
+++ b/roles/openshift_grafana/files/openshift-cluster-monitoring.json
@@ -0,0 +1,5138 @@
+{
+ "dashboard": {
+ "description": "Monitors Openshift cluster using Prometheus. Shows overall cluster CPU / Memory / Filesystem usage as well as individual pod, containers, systemd services statistics. Uses cAdvisor metrics only.",
+ "editable": true,
+ "gnetId": 315,
+ "graphTooltip": 0,
+ "hideControls": false,
+ "id": null,
+ "links": [],
+ "rows": [
+ {
+ "collapse": false,
+ "height": "200px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "height": "200px",
+ "id": 32,
+ "legend": {
+ "alignAsTable": false,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "instant": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Received",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "Sent",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Network I/O pressure",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Network I/O pressure",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": false,
+ "height": "250px",
+ "panels": [
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 4,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster memory usage",
+ "transparent": false,
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 6,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster CPU usage ",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": true,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "percent",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": true,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "180px",
+ "id": 7,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 4,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "metric": "",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Cluster filesystem usage",
+ "type": "singlestat",
+ "valueFontSize": "80%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 9,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "20%",
+ "prefix": "",
+ "prefixFontSize": "20%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 10,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 11,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m]))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "none",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 12,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": " cores",
+ "postfixFontSize": "30%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 13,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Used",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ },
+ {
+ "cacheTimeout": null,
+ "colorBackground": false,
+ "colorValue": false,
+ "colors": [
+ "rgba(50, 172, 45, 0.97)",
+ "rgba(237, 129, 40, 0.89)",
+ "rgba(245, 54, 54, 0.9)"
+ ],
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "format": "bytes",
+ "gauge": {
+ "maxValue": 100,
+ "minValue": 0,
+ "show": false,
+ "thresholdLabels": false,
+ "thresholdMarkers": true
+ },
+ "height": "1px",
+ "id": 14,
+ "interval": null,
+ "links": [],
+ "mappingType": 1,
+ "mappingTypes": [
+ {
+ "name": "value to text",
+ "value": 1
+ },
+ {
+ "name": "range to text",
+ "value": 2
+ }
+ ],
+ "maxDataPoints": 100,
+ "nullPointMode": "connected",
+ "nullText": null,
+ "postfix": "",
+ "postfixFontSize": "50%",
+ "prefix": "",
+ "prefixFontSize": "50%",
+ "rangeMaps": [
+ {
+ "from": "null",
+ "text": "N/A",
+ "to": "null"
+ }
+ ],
+ "span": 2,
+ "sparkline": {
+ "fillColor": "rgba(31, 118, 189, 0.18)",
+ "full": false,
+ "lineColor": "rgb(31, 120, 193)",
+ "show": false
+ },
+ "tableColumn": "",
+ "targets": [
+ {
+ "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/mapper/docker_.*$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": "",
+ "title": "Total",
+ "type": "singlestat",
+ "valueFontSize": "50%",
+ "valueMaps": [
+ {
+ "op": "=",
+ "text": "N/A",
+ "value": "null"
+ }
+ ],
+ "valueName": "current"
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Total usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 33,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) ",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "overall cpu usage",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Cluster CPU Usage",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Dashboard Row",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 17,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name) * 100",
+ "format": "time_series",
+ "hide": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "transparent": false,
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": "% Usage",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 24,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "hideEmpty": false,
+ "hideZero": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": null,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers Cores Usage",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "height": "",
+ "id": 23,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services CPU usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 411,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 3,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 34,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (irate (container_memory_usage_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_cpu",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes Memory usage ",
+ "tooltip": {
+ "msResolution": true,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": "cores",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes CPU usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 25,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 26,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_rss{systemd_service_name=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (systemd_service_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ systemd_service_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "System services memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "System services memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 27,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name=~\"^k8s_.*\",container_name!=\"POD\",kubernetes_io_hostname=~\"^$Node$\"}) by (container_name, pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "B",
+ "step": 10
+ },
+ {
+ "expr": "sum (container_memory_working_set_bytes{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "C",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 0,
+ "grid": {},
+ "id": 28,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": true,
+ "targets": [
+ {
+ "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ id }}",
+ "metric": "container_memory_usage:sort_desc",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes memory usage",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "bytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes memory usage",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "250px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 30,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (container_name, pod_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- pod: {{ pod_name }} | {{ container_name }}",
+ "metric": "network",
+ "refId": "D",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, name, image)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- docker: {{ kubernetes_io_hostname }} | {{ image }} ({{ name }})",
+ "metric": "network",
+ "refId": "C",
+ "step": 1
+ },
+ {
+ "expr": "sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "E",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{rkt_container_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (kubernetes_io_hostname, rkt_container_name)",
+ "format": "time_series",
+ "hide": false,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- rkt: {{ kubernetes_io_hostname }} | {{ rkt_container_name }}",
+ "metric": "network",
+ "refId": "F",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Containers network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Containers network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 277,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 16,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ pod_name }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{image!=\"\",name=~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (pod_name)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ pod_name }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Pods network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "Pods network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": "500px",
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "decimals": 2,
+ "editable": true,
+ "error": false,
+ "fill": 1,
+ "grid": {},
+ "id": 29,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": true,
+ "sideWidth": 200,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "connected",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "instant": true,
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "-> {{ id }}",
+ "metric": "network",
+ "refId": "A",
+ "step": 1
+ },
+ {
+ "expr": "- sum (irate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[2m])) by (id)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "<- {{ id }}",
+ "metric": "network",
+ "refId": "B",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "All processes network I/O ",
+ "tooltip": {
+ "msResolution": false,
+ "shared": true,
+ "sort": 2,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "Bps",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": false
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": false,
+ "title": "All processes network I/O",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 35,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total) by (phase,reason)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ phase }} | {{ reason }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_build_total",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 54,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"running\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of builds that have been running for more than 10 minutes (600 seconds).",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 55,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "count(openshift_build_active_time_seconds{phase=\"pending\"} offset 10m)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of build that have been waiting at least 10 minutes (600 seconds) to start.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 56,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Failed\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds, regardless of the failure reason.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 57,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\",reason=\"FetchSourceFailed\"}",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of failed builds because of problems retrieving source from the associated Git repository.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": true,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 58,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": false,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null as zero",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_build_total{phase=\"Complete\"})",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the number of successfully completed builds.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 0,
+ "id": 59,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 1,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_build_total{phase=\"Failed\"} offset 5m",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ reason }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns the failed builds totals, per failure reason, from 5 minutes ago.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Builds",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 36,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_setup_latency_sum)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_setup_latency_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 41,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(openshift_sdn_pod_teardown_latency{quantile=\"0.9\"}) by (instance)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_teardown_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 50,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "topk(10, (sum by (pod_name) (irate(container_network_receive_bytes_total{pod_name!=\"\"}[5m]))))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ pod_name }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Top 10 pods doing the most receive network traffic",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "decbytes",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 37,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_pod_ips",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ instance }} | {{ role }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_pod_ips",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 39,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "garbage_collector_monitoring_route:openshift:io_v1_rate_limiter_use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 42,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 40,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "openshift_sdn_arp_cache_entries",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 1
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "openshift_sdn_arp_cache_entries",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift SDN",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 44,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_pleg_relist_latency_microseconds{kubernetes_io_hostname=~\"$Node\",quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ role }} | {{ instance }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_pleg_relist",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 51,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "kubelet_docker_operations_latency_microseconds{quantile=\"0.9\"}",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "µs",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 52,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_timeout",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have timed out since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 53,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kubelet_docker_operations_errors",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_type }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Returns a running count (not a rate) of docker operations that have failed since the kubelet was started.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Kubelet",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 46,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(scrape_samples_scraped[2m])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{ kubernetes_name }} | {{ instance }} ",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "scrape_samples_scraped",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 68,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (cpu) (irate(container_cpu_usage_seconds_total{container_name=\"prometheus\"}[5m])))",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU per instance of Prometheus container.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Prometheus",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 48,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb!~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ verb }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 49,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum without (instance,type,client,contentType) (irate(apiserver_request_count{verb=~\"GET|LIST|WATCH\"}[2m]))) > 0",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ resource }} || {{ pod }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Number of non-mutating API requests being made to the control plane.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 74,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "endpoint_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": " quantile {{ quantile }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "endpoint_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "ms",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "API Server",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 61,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "etcd_disk_wal_fsync_duration_seconds_count",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 10
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "etcd_disk_wal_fsync_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "etcd",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 62,
+ "legend": {
+ "alignAsTable": false,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 12,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(changes(container_start_time_seconds[10m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "The number of containers that start or restart over the last ten minutes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "Changes in your cluster",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 63,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of cores in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 64,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(sort_desc(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Total number of consumed cores.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 65,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (kubernetes_io_hostname,type) (irate(container_cpu_usage_seconds_total{id=\"/\"}[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per node in the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 66,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (cpu,id,pod_name,container_name) (irate(container_cpu_usage_seconds_total{role=\"infra\"}[5m])))",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumption per system service or container on the infrastructure nodes.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 67,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sort_desc(sum by (namespace) (irate(container_cpu_usage_seconds_total[5m])))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "CPU consumed per namespace on the cluster.",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 47,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(irate(container_cpu_usage_seconds_total{id=\"/\"}[3m])) / sum(machine_cpu_cores)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster CPU in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 69,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(container_memory_rss) / sum(machine_memory_bytes)",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Percentage of total cluster memory in use",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "percent",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 70,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum by (kubernetes_io_hostname) (irate(container_cpu_usage_seconds_total{id=~\"/system.slice/(docker|etcd).service\"}[5m]))",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "Aggregate CPU usage (seconds total) of etcd+docker",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "System and container CPU",
+ "titleSize": "h6"
+ },
+ {
+ "collapse": true,
+ "height": 250,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 71,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": false,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "volumes_queue_latency",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "volumes_queue_latency",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 72,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "irate(cloudprovider_gce_api_request_duration_seconds_count[2m])",
+ "format": "time_series",
+ "intervalFactor": 2,
+ "legendFormat": "{{ request }}",
+ "refId": "A",
+ "step": 4
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "cloudprovider_aws_api_request_duration_seconds_count",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "none",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "${DS_PR}",
+ "fill": 1,
+ "id": 73,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "sort": "avg",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [
+ {
+ "title": "Kubernetes Storage Metrics via Prometheus",
+ "type": "absolute",
+ "url": "https://docs.google.com/document/d/1Fh0T60T_y888LsRwC51CQHO75b2IZ3A34ZQS71s_F0g"
+ }
+ ],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "span": 6,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum (irate(storage_operation_duration_seconds_sum{kubernetes_io_hostname=~\"$Node\"}[2m])) by (operation_name,kubernetes_io_hostname)",
+ "format": "time_series",
+ "interval": "1s",
+ "intervalFactor": 1,
+ "legendFormat": "{{ operation_name }} || {{ kubernetes_io_hostname }}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "storage_operation_duration_seconds_sum",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "repeat": null,
+ "repeatIteration": null,
+ "repeatRowId": null,
+ "showTitle": true,
+ "title": "OpenShift Volumes",
+ "titleSize": "h6"
+ }
+ ],
+ "schemaVersion": 14,
+ "style": "dark",
+ "tags": [
+ "kubernetes",
+ "openshift"
+ ],
+ "templating": {
+ "list": [
+ {
+ "allValue": ".*",
+ "current": {},
+ "datasource": "${DS_PR}",
+ "hide": 0,
+ "includeAll": true,
+ "label": null,
+ "multi": false,
+ "name": "Node",
+ "options": [],
+ "query": "label_values(kubernetes_io_hostname)",
+ "refresh": 1,
+ "regex": "",
+ "sort": 0,
+ "tagValuesQuery": "",
+ "tags": [],
+ "tagsQuery": "",
+ "type": "query",
+ "useTags": false
+ }
+ ]
+ },
+ "time": {
+ "from": "now-30m",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "1s",
+ "2m",
+ "20s",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "browser",
+ "title": "openshift cluster monitoring",
+ "version": 6
+ }
+}
diff --git a/roles/openshift_grafana/meta/main.yml b/roles/openshift_grafana/meta/main.yml
new file mode 100644
index 000000000..8dea6f197
--- /dev/null
+++ b/roles/openshift_grafana/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Eldad Marciano
+ description: Setup grafana pod
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - metrics
diff --git a/roles/openshift_grafana/tasks/gf-permissions.yml b/roles/openshift_grafana/tasks/gf-permissions.yml
new file mode 100644
index 000000000..9d3c741ee
--- /dev/null
+++ b/roles/openshift_grafana/tasks/gf-permissions.yml
@@ -0,0 +1,12 @@
+---
+- name: Create gf user on htpasswd
+ command: htpasswd -c /etc/origin/master/htpasswd gfadmin
+
+- name: Make sure master config use HTPasswdPasswordIdentityProvider
+ command: "sed -ie 's|AllowAllPasswordIdentityProvider|HTPasswdPasswordIdentityProvider\n file: /etc/origin/master/htpasswd|' /etc/origin/master/master-config.yaml"
+
+- name: Grant permission for gfuser
+ command: oc adm policy add-cluster-role-to-user cluster-reader gfadmin
+
+- name: Restart mater api
+ command: systemctl restart atomic-openshift-master-api.service
diff --git a/roles/openshift_grafana/tasks/main.yml b/roles/openshift_grafana/tasks/main.yml
new file mode 100644
index 000000000..6a06d40a9
--- /dev/null
+++ b/roles/openshift_grafana/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+- name: Create grafana namespace
+ oc_project:
+ state: present
+ name: grafana
+
+- name: Configure Grafana Permissions
+ include_tasks: tasks/gf-permissions.yml
+ when: gf_oauth | default(false) | bool == true
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp.yaml dest=/tmp/grafana-ocp.yaml
+ register:
+ cl_file: /tmp/grafana-ocp.yaml
+ when: gf_oauth | default(false) | bool == false
+
+# TODO: we should grab this yaml file from openshift/origin
+- name: Templatize grafana yaml
+ template: src=grafana-ocp-oauth.yaml dest=/tmp/grafana-ocp-oauth.yaml
+ register:
+ cl_file: /tmp/grafana-ocp-oauth.yaml
+ when: gf_oauth | default(false) | bool == true
+
+- name: Process the grafana file
+ oc_process:
+ namespace: grafana
+ template_name: "{{ cl_file }}"
+ create: True
+ when: gf_oauth | default(false) | bool == true
+
+- name: Wait to grafana be running
+ command: oc rollout status deployment/grafana-ocp
+
+- name: oc adm policy add-role-to-user view -z grafana-ocp -n {{ gf_prometheus_namespace }}
+ oc_adm_policy_user:
+ user: grafana-ocp
+ resource_kind: cluster-role
+ resource_name: view
+ state: present
+ role_namespace: "{{ gf_prometheus_namespace }}"
+
+- name: Get grafana route
+ oc_obj:
+ kind: route
+ name: grafana
+ namespace: grafana
+ register: route
+
+- name: Get prometheus route
+ oc_obj:
+ kind: route
+ name: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: route
+
+- name: Get the prometheus SA
+ oc_serviceaccount_secret:
+ state: list
+ service_account: prometheus
+ namespace: "{{ gf_prometheus_namespace }}"
+ register: sa
+
+- name: Get the management SA bearer token
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: "{{ gf_prometheus_namespace }}"
+ no_log: True
+ register: sa_secret
+
+- name: Get the SA bearer token for prometheus
+ set_fact:
+ token: "{{ sa_secret.results.encoded.token }}"
+
+- name: Convert to json
+ var:
+ ds_json: "{{ gf_body_tmp }} | to_json }}"
+
+- name: Set protocol type
+ var:
+ protocol: "{{ 'https' if {{ gf_oauth }} == true else 'http' }}"
+
+- name: Add gf datasrouce
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/datasources"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ ds_json | regex_replace('grafana_name', {{ gf_datasource_name }}) | regex_replace('prometheus_url', 'https://'{{ prometheus }} ) | regex_replace('satoken', {{ token }}) }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex setup ds name
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
+
+- name: Add new dashboard
+ uri:
+ url: "{{ protocol }}://{{ route }}/api/dashboards/db"
+ user: admin
+ password: admin
+ method: POST
+ body: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ headers:
+ Content-Type: "Content-Type: application/json"
+ register: add_ds
+
+- name: Regex json tear down
+ replace:
+ path: "{{ lookup('file', 'openshift-cluster-monitoring.json') }}"
+ regexp: '${DS_PR}'
+ replace: '{{ gf_datasource_name }}'
+ backup: yes
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index dcaf87eca..c83adb26d 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -175,6 +175,8 @@ def format_failure(failure):
play = failure['play']
task = failure['task']
msg = failure['msg']
+ if not isinstance(msg, string_types):
+ msg = str(msg)
checks = failure['checks']
fields = (
(u'Hosts', host),
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 83e551b5d..b9c41d1b4 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -5,6 +5,7 @@ Health checks for OpenShift clusters.
import json
import operator
import os
+import re
import time
import collections
@@ -309,28 +310,38 @@ class OpenShiftCheck(object):
name_list = name_list.split(',')
return [name.strip() for name in name_list if name.strip()]
- @staticmethod
- def get_major_minor_version(openshift_image_tag):
+ def get_major_minor_version(self, openshift_image_tag=None):
"""Parse and return the deployed version of OpenShift as a tuple."""
- if openshift_image_tag and openshift_image_tag[0] == 'v':
- openshift_image_tag = openshift_image_tag[1:]
- # map major release versions across releases
- # to a common major version
- openshift_major_release_version = {
- "1": "3",
- }
+ version = openshift_image_tag or self.get_var("openshift_image_tag")
+ components = [int(component) for component in re.findall(r'\d+', version)]
- components = openshift_image_tag.split(".")
- if not components or len(components) < 2:
+ if len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(openshift_image_tag))
+ raise OpenShiftCheckException(msg.format(version))
+
+ # map major release version across releases to OCP major version
+ components[0] = {1: 3}.get(components[0], components[0])
+
+ return tuple(int(x) for x in components[:2])
+
+ def get_required_version(self, name, version_map):
+ """Return the correct required version(s) for the current (or nearest) OpenShift version."""
+ openshift_version = self.get_major_minor_version()
+
+ earliest = min(version_map)
+ latest = max(version_map)
+ if openshift_version < earliest:
+ return version_map[earliest]
+ if openshift_version > latest:
+ return version_map[latest]
- if components[0] in openshift_major_release_version:
- components[0] = openshift_major_release_version[components[0]]
+ required_version = version_map.get(openshift_version)
+ if not required_version:
+ msg = "There is no recommended version of {} for the current version of OpenShift ({})"
+ raise OpenShiftCheckException(msg.format(name, ".".join(str(comp) for comp in openshift_version)))
- components = tuple(int(x) for x in components[:2])
- return components
+ return required_version
def find_ansible_mount(self, path):
"""Return the mount point for path from ansible_mounts."""
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index 87e6146d4..6e30a8610 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -21,7 +21,7 @@ class DiskAvailability(OpenShiftCheck):
'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
- # see roles/openshift_cli/library/openshift_container_binary_sync.py.
+ # see roles/lib_utils/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'oo_masters_to_config': 1 * 10**9,
'oo_nodes_to_config': 1 * 10**9,
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 7afb8f730..d298fbab2 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
# command for checking if remote registries have an image, without docker pull
- skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
@@ -56,7 +56,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# ordered list of registries (according to inventory vars) that docker will try for unscoped images
regs = self.ensure_list("openshift_docker_additional_registries")
# currently one of these registries is added whether the user wants it or not.
- deployment_type = self.get_var("openshift_deployment_type")
+ deployment_type = self.get_var("openshift_deployment_type", default="")
if deployment_type == "origin" and "docker.io" not in regs:
regs.append("docker.io")
elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
@@ -76,11 +76,20 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if oreg_auth_user != '' and oreg_auth_password != '':
oreg_auth_user = self.template_var(oreg_auth_user)
oreg_auth_password = self.template_var(oreg_auth_password)
- self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+ self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
+ # take note of any proxy settings needed
+ proxies = []
+ for var in ['http_proxy', 'https_proxy', 'no_proxy']:
+ # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy
+ value = self.get_var("openshift_" + var, default=None)
+ if value:
+ proxies.append(var.upper() + "=" + quote(self.template_var(value)))
+ self.skopeo_proxy_vars = " ".join(proxies)
+
def is_active(self):
"""Skip hosts with unsupported deployment types."""
deployment_type = self.get_var("openshift_deployment_type")
@@ -249,11 +258,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not self.reachable_registries[registry]:
continue # do not keep trying unreachable registries
- args = dict(registry=registry, image=image)
- args["tls"] = "false" if registry in self.registries["insecure"] else "true"
- args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
+ args = dict(
+ proxyvars=self.skopeo_proxy_vars,
+ tls="false" if registry in self.registries["insecure"] else "true",
+ creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "",
+ registry=quote(registry),
+ image=quote(image),
+ )
- result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
+ result = self.execute_module_with_retries("command", {
+ "_uses_shell": True,
+ "_raw_params": self.skopeo_command.format(**args),
+ })
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
@@ -263,6 +279,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def connect_to_registry(self, registry):
"""Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ if self.skopeo_proxy_vars != "":
+ # assume we can't connect directly; just waive the test
+ return True
+
# test a simple TCP connection
host, _, port = registry.partition(":")
port = port or 443
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
index 986a01f38..7f8c6ebdc 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -170,7 +170,7 @@ class Elasticsearch(LoggingCheck):
"""
errors = []
for pod_name in pods_by_name.keys():
- df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ df_cmd = '-c elasticsearch exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
disk_output = self.exec_oc(df_cmd, [], save_as_name='get_pv_diskspace.json')
lines = disk_output.splitlines()
# expecting one header looking like 'IUse% Use%' and one body line
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 3b1cf8baa..16ec3a7f6 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -5,12 +5,11 @@ Module for performing checks on a Kibana logging deployment
import json
import ssl
-try:
- from urllib2 import HTTPError, URLError
- import urllib2
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib import request
+# pylint: disable=import-error,no-name-in-module
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
@@ -65,7 +64,7 @@ class Kibana(LoggingCheck):
# Verify that the url is returning a valid response
try:
# We only care if the url connects and responds
- return_code = urllib2.urlopen(url, context=ctx).getcode()
+ return_code = request.urlopen(url, context=ctx).getcode()
except HTTPError as httperr:
return httperr.reason
except URLError as urlerr:
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 0cad19842..58a2692bd 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
currently installed version of OpenShift.
"""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -16,10 +16,12 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
tags = ["health"]
openshift_to_ovs_version = {
- "3.7": ["2.6", "2.7", "2.8"],
- "3.6": ["2.6", "2.7", "2.8"],
- "3.5": ["2.6", "2.7"],
- "3.4": "2.4",
+ (3, 4): "2.4",
+ (3, 5): ["2.6", "2.7"],
+ (3, 6): ["2.6", "2.7", "2.8"],
+ (3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
def is_active(self):
@@ -40,16 +42,5 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
return self.execute_module("rpm_version", args)
def get_required_ovs_version(self):
- """Return the correct Open vSwitch version for the current OpenShift version"""
- openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
-
- if openshift_version_tuple < (3, 5):
- return self.openshift_to_ovs_version["3.4"]
-
- openshift_version = ".".join(str(x) for x in openshift_version_tuple)
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if ovs_version:
- return self.openshift_to_ovs_version[openshift_version]
-
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(openshift_version))
+ """Return the correct Open vSwitch version(s) for the current OpenShift version."""
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index f3a628e28..28aee8b35 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,8 +1,6 @@
"""Check that available RPM packages match the required versions."""
-import re
-
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException
+from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import NotContainerizedMixin
@@ -18,6 +16,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 5): ["2.6", "2.7"],
(3, 6): ["2.6", "2.7", "2.8"],
(3, 7): ["2.6", "2.7", "2.8"],
+ (3, 8): ["2.6", "2.7", "2.8"],
+ (3, 9): ["2.6", "2.7", "2.8"],
}
openshift_to_docker_version = {
@@ -27,11 +27,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
(3, 4): "1.12",
(3, 5): "1.12",
(3, 6): "1.12",
- }
-
- # map major OpenShift release versions across releases to a common major version
- map_major_release_version = {
- 1: 3,
+ (3, 7): "1.12",
+ (3, 8): "1.12",
+ (3, 9): ["1.12", "1.13"],
}
def is_active(self):
@@ -83,48 +81,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_ovs_version)
- latest = max(self.openshift_to_ovs_version)
- if openshift_version < earliest:
- return self.openshift_to_ovs_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_ovs_version[latest]
-
- ovs_version = self.openshift_to_ovs_version.get(openshift_version)
- if not ovs_version:
- msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return ovs_version
+ return self.get_required_version("Open vSwitch", self.openshift_to_ovs_version)
def get_required_docker_version(self):
"""Return the correct Docker version(s) for the current OpenShift version."""
- openshift_version = self.get_openshift_version_tuple()
-
- earliest = min(self.openshift_to_docker_version)
- latest = max(self.openshift_to_docker_version)
- if openshift_version < earliest:
- return self.openshift_to_docker_version[earliest]
- if openshift_version > latest:
- return self.openshift_to_docker_version[latest]
-
- docker_version = self.openshift_to_docker_version.get(openshift_version)
- if not docker_version:
- msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
- raise OpenShiftCheckException(msg.format(".".join(str(comp) for comp in openshift_version)))
-
- return docker_version
-
- def get_openshift_version_tuple(self):
- """Return received image tag as a normalized (X, Y) minor version tuple."""
- version = self.get_var("openshift_image_tag")
- comps = [int(component) for component in re.findall(r'\d+', version)]
-
- if len(comps) < 2:
- msg = "An invalid version of OpenShift was found for this host: {}"
- raise OpenShiftCheckException(msg.format(version))
-
- comps[0] = self.map_major_release_version.get(comps[0], comps[0])
- return tuple(comps[0:2])
+ return self.get_required_version("Docker", self.openshift_to_docker_version)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 04a5e89c4..750d4b9e9 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -1,12 +1,10 @@
import pytest
import json
-try:
- import urllib2
- from urllib2 import HTTPError, URLError
-except ImportError:
- from urllib.error import HTTPError, URLError
- import urllib.request as urllib2
+# pylint can't find the package when its installed in virtualenv
+from ansible.module_utils.six.moves.urllib import request # pylint: disable=import-error
+# pylint: disable=import-error
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException
@@ -202,7 +200,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
if type(lib_result) is int:
return _http_return(lib_result)
raise lib_result
- monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+ monkeypatch.setattr(request, 'urlopen', urlopen)
check = Kibana()
check._get_kibana_url = lambda: 'url'
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 0238f49d5..80c7a0541 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -1,26 +1,7 @@
import pytest
-from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
-
-
-def test_openshift_version_not_supported():
- def execute_module(*_):
- return {}
-
- openshift_release = '111.7.0'
-
- task_vars = dict(
- openshift=dict(common=dict()),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
- openshift_service_type='origin'
- )
-
- with pytest.raises(OpenShiftCheckException) as excinfo:
- OvsVersion(execute_module, task_vars).run()
-
- assert "no recommended version of Open vSwitch" in str(excinfo.value)
+from openshift_checks.ovs_version import OvsVersion
+from openshift_checks import OpenShiftCheckException
def test_invalid_openshift_release_format():
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index d2916f617..868b4bd12 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,6 +1,7 @@
import pytest
-from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
+from openshift_checks.package_version import PackageVersion
+from openshift_checks import OpenShiftCheckException
def task_vars_for(openshift_release, deployment_type):
@@ -18,7 +19,7 @@ def task_vars_for(openshift_release, deployment_type):
def test_openshift_version_not_supported():
check = PackageVersion(None, task_vars_for("1.2.3", 'origin'))
- check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict
+ check.get_major_minor_version = lambda: (3, 4, 1) # won't be in the dict
with pytest.raises(OpenShiftCheckException) as excinfo:
check.get_required_ovs_version()
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index b6501d288..f40085976 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -69,7 +69,7 @@ r_openshift_hosted_router_os_firewall_allow: []
############
openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
-penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
openshift_hosted_registry_routecertificates: {}
openshift_hosted_registry_routetermination: "passthrough"
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 2dc9c98f6..c2be00d19 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -18,6 +18,7 @@
- name: set_fact replicas
set_fact:
+ # get_router_replicas is a custom filter in role lib_utils
replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
- name: Get the certificate contents for router
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
index 77f020357..fef945d51 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
@@ -1,4 +1,10 @@
---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-hosted-ansible-XXXXXX
+ register: mktempHosted
+ changed_when: False
+ check_mode: no
+
- name: Generate GlusterFS registry endpoints
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
@@ -14,3 +20,10 @@
with_items:
- "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
- "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktempHosted.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index cc3159a32..0786e2d2f 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 9f2e6125d..ccea54aaf 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index f04ce06d3..15ad4e9af 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
index c178cf432..7acefa0f0 100644
--- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 27cfc17d6..a192bd67e 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -177,6 +177,9 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_external_address`: The IP address that mux will listen
+ on for connections from *external* clients. Default is the default ipv4
+ interface as reported by the `ansible_default_ipv4` fact.
- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index ba412b5a6..247c7e4df 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -79,14 +79,6 @@ def entry_from_named_pair(register_pairs, key):
raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def serviceaccount_name(qualified_sa):
''' Returns the simple name from a fully qualified name '''
return qualified_sa.split(":")[-1]
@@ -134,7 +126,6 @@ class FilterModule(object):
return {
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
- 'map_from_pairs': map_from_pairs,
'min_cpu': min_cpu,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 302a9b4c9..37ffb0204 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -276,7 +276,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
# this needs to end up nested under the service account...
@@ -288,7 +288,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
return
for item in role["subjects"]:
comp = self.comp(item["name"])
- if comp is not None and namespace == item["namespace"]:
+ if comp is not None and namespace == item.get("namespace"):
self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
# pylint: disable=no-self-use, too-many-return-statements
diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
index 4a2ee64f0..6fdba6580 100644
--- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml
+++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml
@@ -12,6 +12,7 @@
separator: '#'
content:
metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ metadata#annotations#openshift.io/logging.data.prefix: ".operations"
with_items: "{{ __logging_ops_projects.stdout.split(' ') }}"
loop_control:
loop_var: project
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index fbc3e3fd1..ced7397b5 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -131,13 +131,13 @@
when:
not openshift_logging_install_eventrouter | default(false) | bool
-# Update asset config in openshift-web-console namespace
-- name: Remove Kibana route information from web console asset config
+# Update console config in openshift-web-console namespace
+- name: Remove Kibana route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: loggingPublicURL
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 0d7f8c056..a40449bf6 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -19,7 +19,7 @@
command: >
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
- --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false
check_mode: no
when:
- not ca_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 67904a9d3..3afd8680f 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -87,14 +87,14 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
loop_control:
@@ -114,7 +114,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -151,7 +151,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -169,7 +169,7 @@
_es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() | list }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
loop_control:
@@ -193,7 +193,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name | default() }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -321,9 +321,9 @@
- name: Add Kibana route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: loggingPublicURL
+ console_config_edits:
+ - key: clusterInfo#loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index bc817075d..d28d1d160 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -30,7 +30,7 @@
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
- --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false
check_mode: no
when:
- cert_info.hostnames is defined
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 5bee58725..df5299a83 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_8"
-__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8"]
+__latest_curator_version: "3_9"
+__allowed_curator_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
index 9182bddb2..16de6f252 100644
--- a/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/get_es_version.yml
@@ -1,6 +1,6 @@
---
- command: >
- oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component=es,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Getting ES version for logging-es cluster"
@@ -10,7 +10,7 @@
when: _cluster_pods.stdout_lines | count > 0
- command: >
- oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component=es-ops,provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _ops_cluster_pods
- name: "Getting ES version for logging-es-ops cluster"
diff --git a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
index d55beec86..6bce13d1d 100644
--- a/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
+++ b/roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml
@@ -19,7 +19,7 @@
## get all pods for the cluster
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
@@ -64,7 +64,7 @@
## we may need a new first pod to run against -- fetch them all again
- command: >
- oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+ oc get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
register: _cluster_pods
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index ef259cd3a..122231031 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,6 +1,6 @@
---
-__latest_es_version: "3_8"
-__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8"]
+__latest_es_version: "3_9"
+__allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 9b58e4456..87b4204b5 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -5,6 +5,7 @@ openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshi
openshift_logging_fluentd_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_cpu_request: 100m
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 529859983..79ebbca08 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -172,8 +172,8 @@
app_port: "{{ openshift_logging_fluentd_app_port }}"
ops_host: "{{ openshift_logging_fluentd_ops_host }}"
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
- fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys() | first }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values() | first }}"
fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index 762e3d4d0..b60da814f 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_8"
-__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8"]
+__latest_fluentd_version: "3_9"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index a2c54d8e4..fed926a3b 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_8"
-__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8"]
+__latest_kibana_version: "3_9"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index db6f23126..e87c8d33e 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -6,6 +6,7 @@ openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_pub
openshift_logging_mux_namespace: logging
### Common settings
+# map_from_pairs is a custom filter plugin in role lib_utils
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
openshift_logging_mux_cpu_limit: null
openshift_logging_mux_cpu_request: 100m
@@ -30,6 +31,7 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
openshift_logging_mux_port: 24284
+openshift_logging_mux_external_address: "{{ ansible_default_ipv4.address }}"
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 34bdb891c..7eba3cda4 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -148,7 +148,7 @@
port: "{{ openshift_logging_mux_port }}"
targetPort: "mux-forward"
external_ips:
- - "{{ ansible_eth0.ipv4.address }}"
+ - "{{ openshift_logging_mux_external_address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index 1da053b4a..e87205bad 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_8"
-__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8"]
+__latest_mux_version: "3_9"
+__allowed_mux_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index eea1401b8..b12a6b346 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -181,6 +181,7 @@
- restart master api
- set_fact:
+ # translate_idps is a custom filter in role lib_utils
translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
# TODO: add the validate parameter when there is a validation command to run
diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
index f72710832..4564f33dd 100644
--- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
@@ -8,8 +8,25 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-- name: Upgrade master packages
- package: name={{ master_pkgs | join(',') }} state=present
+- name: Upgrade master packages - yum
+ command:
+ yum install -y {{ master_pkgs | join(' ') }} \
+ {{ ' --exclude *' ~ openshift_service_type ~ '*3.9*' if openshift_release | version_compare('3.9','<') else '' }}
+ vars:
+ master_pkgs:
+ - "{{ openshift_service_type }}{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version | default('') }}"
+ - "{{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }}"
+ register: result
+ until: result is succeeded
+ when: ansible_pkg_mgr == 'yum'
+
+- name: Upgrade master packages - dnf
+ dnf:
+ name: "{{ master_pkgs | join(',') }}"
+ state: present
vars:
master_pkgs:
- "{{ openshift_service_type }}{{ openshift_pkg_version }}"
@@ -17,6 +34,6 @@
- "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
- - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
register: result
until: result is succeeded
+ when: ansible_pkg_mgr == 'dnf'
diff --git a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
index 8558bf3e9..995a5ab70 100644
--- a/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
+++ b/roles/openshift_master/tasks/upgrade/upgrade_scheduler.yml
@@ -1,6 +1,8 @@
---
# Upgrade predicates
- vars:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 649a4bc5d..ce27e238f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -101,6 +101,7 @@
state: hard
force: true
with_items:
+ # certificates_to_synchronize is a custom filter in lib_utils
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 85d0ac25c..f450c916a 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -57,6 +57,7 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ # oo_htpasswd_users_from_file is a custom filter in role lib_utils
htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}"
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
@@ -90,6 +91,8 @@
- name: Set Default scheduler predicates and priorities
set_fact:
+ # openshift_master_facts_default_predicates is a custom lookup plugin in
+ # role lib_utils
openshift_master_scheduler_default_predicates: "{{ lookup('openshift_master_facts_default_predicates') }}"
openshift_master_scheduler_default_priorities: "{{ lookup('openshift_master_facts_default_priorities') }}"
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 0866fe0d2..0dd5d1621 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -74,10 +74,10 @@
- name: Add metrics route information to web console asset config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: metricsPublicURL
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
index 8ccfb7192..057963c1a 100644
--- a/roles/openshift_metrics/tasks/oc_apply.yaml
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -16,7 +16,9 @@
apply -f {{ file_name }}
-n {{namespace}}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -28,5 +30,7 @@
register: version_changed
vars:
init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
- failed_when: "'error' in version_changed.stderr"
+ failed_when:
+ - "'error' in version_changed.stderr"
+ - "version_changed.rc != 0"
changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 610c7b4e5..1664e9975 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -19,13 +19,13 @@
clusterrolebinding/hawkular-metrics
changed_when: delete_metrics.stdout != 'No resources found'
-# Update asset config in openshift-web-console namespace
-- name: Remove metrics route information from web console asset config
+# Update the web config in openshift-web-console namespace
+- name: Remove metrics route information from the web console config
include_role:
name: openshift_web_console
- tasks_from: update_asset_config.yml
+ tasks_from: update_console_config.yml
vars:
- asset_config_edits:
- - key: metricsPublicURL
+ console_config_edits:
+ - key: clusterInfo#metricsPublicURL
value: ""
when: openshift_web_console_install | default(true) | bool
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
deleted file mode 100644
index 6ed6d404c..000000000
--- a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use with openshift named certificates
-'''
-
-
-class FilterModule(object):
- ''' Custom ansible filters for use with openshift named certificates'''
-
- @staticmethod
- def oo_named_certificates_list(named_certificates):
- ''' Returns named certificates list with correct fields for the master
- config file.'''
- return [{'certFile': named_certificate['certfile'],
- 'keyFile': named_certificate['keyfile'],
- 'names': named_certificate['names']} for named_certificate in named_certificates]
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c1fab4382..5864d3c03 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -48,6 +48,12 @@ openshift_node_kubelet_args_dict:
cloud-config:
- "{{ openshift_config_base ~ '/cloudprovider/gce.conf' }}"
node-labels: "{{ l_node_kubelet_node_labels }}"
+ azure:
+ cloud-provider:
+ - azure
+ cloud-config:
+ - "{{ openshift_config_base ~ '/cloudprovider/azure.conf' }}"
+ node-labels: "{{ l_node_kubelet_node_labels }}"
undefined:
node-labels: "{{ l_node_kubelet_node_labels }}"
@@ -71,6 +77,18 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+ source: "{{ oreg_auth_credentials_path }}"
+ destination: "/root/.docker"
+ options:
+ - ro
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_node_image_dict:
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 55738d759..a4a9c1237 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -1,28 +1,18 @@
---
-- when: not openshift_is_containerized | bool
- block:
- - name: Install Node package
- package:
- name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- register: result
- until: result is succeeded
-
- - name: Install sdn-ovs package
- package:
- name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - openshift_node_use_openshift_sdn | bool
- register: result
- until: result is succeeded
-
- - name: Install conntrack-tools package
- package:
- name: "conntrack-tools"
- state: present
- register: result
- until: result is succeeded
+- name: Install Node package, sdn-ovs, conntrack packages
+ package:
+ name: "{{ item.name }}"
+ state: present
+ register: result
+ until: result is succeeded
+ with_items:
+ - name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ - name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"
+ install: "{{ openshift_node_use_openshift_sdn | bool }}"
+ - name: "conntrack-tools"
+ when:
+ - not openshift_is_containerized | bool
+ - item['install'] | default(True) | bool
- when:
- openshift_is_containerized | bool
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 06b879050..008f209d7 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -14,4 +14,23 @@
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift_service_type }}.service"
+ - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}'
state: latest
+ vars:
+ # We need to evaluate some variables here to ensure
+ # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been
+ # processed.
+
+ # Determine if we want to include auth credentials mount.
+ l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}"
+
+ # Join any user-provided mounts and auth_mounts into a combined list.
+ l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}"
+
+ # We must prepend a ',' here to ensure the value is inserted properly into an
+ # existing json list in the container's config.json
+ # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py
+ l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}"
+ # if we have just a ',' then both mount lists were empty, we don't want to add
+ # anything to config.json
+ l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}"
diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml
index 721656117..dd9183382 100644
--- a/roles/openshift_node/tasks/upgrade/config_changes.yml
+++ b/roles/openshift_node/tasks/upgrade/config_changes.yml
@@ -21,6 +21,12 @@
path: "/var/lib/dockershim/sandbox/"
state: absent
+# https://bugzilla.redhat.com/show_bug.cgi?id=1518912
+- name: Clean up IPAM data
+ file:
+ path: "/var/lib/cni/networks/openshift-sdn/"
+ state: absent
+
# Disable Swap Block (pre)
- block:
- name: Remove swap entries from /etc/fstab
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index 91a358095..d4b47bb9e 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -12,7 +12,7 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "dnsmasq"
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
index c9094e05a..ef5d8d662 100644
--- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
@@ -14,6 +14,6 @@
until: result is succeeded
vars:
openshift_node_upgrade_rpm_list:
- - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift_service_type }}-node{{ openshift_pkg_version | default('') }}"
- "PyYAML"
- "openvswitch"
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1be5d3a62..8e7c6288a 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -523,7 +523,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -589,8 +589,13 @@ resources:
secgrp:
- { get_resource: lb-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_lb_volume_size }}
{% if not openshift_openstack_provider_network_name %}
@@ -655,7 +660,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -725,7 +730,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -792,8 +797,13 @@ resources:
{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_infra_volume_size }}
{% if openshift_openstack_infra_server_group_policies|length > 0 %}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index a829da34f..29b09f3c9 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -102,13 +102,11 @@ parameters:
label: Attach-float-net
description: A switch for floating network port connection
-{% if not openshift_openstack_provider_network_name %}
floating_network:
type: string
default: ''
label: Floating network
description: Network to allocate floating IP from
-{% endif %}
availability_zone:
type: string
@@ -212,6 +210,9 @@ resources:
host-type: { get_param: type }
sub-host-type: { get_param: subtype }
node_labels: { get_param: node_labels }
+{% if openshift_openstack_dns_nameservers %}
+ openshift_hostname: { get_param: name }
+{% endif %}
scheduler_hints: { get_param: scheduler_hints }
{% if use_trunk_ports|default(false)|bool %}
diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml
index 0b4dd7d1f..b1d9c8cca 100644
--- a/roles/openshift_persistent_volumes/tasks/main.yml
+++ b/roles/openshift_persistent_volumes/tasks/main.yml
@@ -26,7 +26,8 @@
when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
- name: create standard pv and pvc lists
- # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+ # generate_pv_pvcs_list is a custom action module defined in
+ # roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
generate_pv_pvcs_list: {}
register: l_pv_pvcs_list
diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml
index ef9ab7f5f..865269b7a 100644
--- a/roles/openshift_persistent_volumes/tasks/pv.yml
+++ b/roles/openshift_persistent_volumes/tasks/pv.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pv_create_output
when: persistent_volumes | length > 0
- failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+ failed_when: "('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) and pv_create_output.rc != 0"
changed_when: ('created' in pv_create_output.stdout)
diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml
index 2c5519192..6c12d128c 100644
--- a/roles/openshift_persistent_volumes/tasks/pvc.yml
+++ b/roles/openshift_persistent_volumes/tasks/pvc.yml
@@ -13,5 +13,5 @@
--config={{ mktemp.stdout }}/admin.kubeconfig
register: pvc_create_output
when: persistent_volume_claims | length > 0
- failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+ failed_when: "('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) and pvc_create_output.rc != 0"
changed_when: ('created' in pvc_create_output.stdout)
diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml
index a4ce53eae..239e1f1cc 100644
--- a/roles/openshift_provisioners/tasks/oc_apply.yaml
+++ b/roles/openshift_provisioners/tasks/oc_apply.yaml
@@ -15,7 +15,9 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: no
- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
@@ -36,7 +38,9 @@
delete -f {{ file_name }}
-n {{ namespace }}
register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
+ failed_when:
+ - "'error' in generation_delete.stderr"
+ - "generation_delete.rc != 0"
changed_when: generation_delete.rc == 0
when: generation_apply.rc != 0
@@ -46,6 +50,8 @@
apply -f {{ file_name }}
-n {{ namespace }}
register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
+ failed_when:
+ - "'error' in generation_apply.stderr"
+ - "generation_apply.rc != 0"
changed_when: generation_apply.rc == 0
when: generation_apply.rc != 0
diff --git a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
index 72c47b8ee..14f1f72c2 100644
--- a/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
+++ b/roles/openshift_sanitize_inventory/filter_plugins/openshift_sanitize_inventory.py
@@ -6,15 +6,6 @@
import re
-# This should be removed after map_from_pairs is no longer used in __deprecations_logging.yml
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
def vars_with_pattern(source, pattern=""):
''' Returns a list of variables whose name matches the given pattern '''
if source == '':
@@ -39,6 +30,5 @@ class FilterModule(object):
def filters(self):
''' Returns the names of the filters provided by this class '''
return {
- 'map_from_pairs': map_from_pairs,
'vars_with_pattern': vars_with_pattern
}
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 7c848cb12..15ca9838c 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,6 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+openshift_service_catalog_async_bindings_enabled: false
openshift_use_openshift_sdn: True
# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index e478023f8..72110b18c 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -59,11 +59,6 @@
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
-- shell: >
- {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
- register: get_apiservices
- changed_when: no
-
- name: Create api service
oc_obj:
state: present
@@ -86,4 +81,3 @@
caBundle: "{{ apiserver_ca.content }}"
groupPriorityMinimum: 20
versionPriority: 10
- when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index cfecaa12c..9b38a85c4 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -179,6 +179,8 @@
etcd_servers: "{{ openshift.master.etcd_urls | join(',') }}"
etcd_cafile: "{{ '/etc/origin/master/master.etcd-ca.crt' if etcd_ca_crt.stat.exists else '/etc/origin/master/ca-bundle.crt' }}"
node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+ # apiserver_ca is defined in generate_certs.yml
+ ca_hash: "{{ apiserver_ca.content|hash('sha1') }}"
- name: Set Service Catalog API Server daemonset
oc_obj:
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index 4f51b8c3c..e345df32c 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -14,6 +14,8 @@ spec:
type: RollingUpdate
template:
metadata:
+ annotations:
+ ca_hash: {{ ca_hash }}
labels:
app: apiserver
spec:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 137222f04..c61e05f73 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -8,7 +8,7 @@ spec:
selector:
matchLabels:
app: controller-manager
- strategy:
+ updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
@@ -38,6 +38,10 @@ spec:
- "5m"
- --feature-gates
- OriginatingIdentity=true
+{% if openshift_service_catalog_async_bindings_enabled | bool %}
+ - --feature-gates
+ - AsyncBindingOperations=true
+{% endif %}
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
new file mode 100644
index 000000000..63dd5cce6
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterblock-provisioner.yml
@@ -0,0 +1,104 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.9/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
deleted file mode 100644
index a86c96df7..000000000
--- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
+++ /dev/null
@@ -1,23 +0,0 @@
-'''
- Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
-'''
-
-
-def map_from_pairs(source, delim="="):
- ''' Returns a dict given the source and delim delimited '''
- if source == '':
- return dict()
-
- return dict(item.split(delim) for item in source.split(","))
-
-
-# pylint: disable=too-few-public-methods
-class FilterModule(object):
- ''' OpenShift Storage GlusterFS Filters '''
-
- # pylint: disable=no-self-use, too-few-public-methods
- def filters(self):
- ''' Returns the names of the filters provided by this class '''
- return {
- 'map_from_pairs': map_from_pairs
- }
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 2ea7286f3..a374df0ce 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index b7cff6514..544a6f491 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -4,6 +4,7 @@
glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"
glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"
+ # map_from_pairs is a custom filter plugin in role lib_utils
glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}"
glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
diff --git a/roles/openshift_version/tasks/check_available_rpms.yml b/roles/openshift_version/tasks/check_available_rpms.yml
index bdbc63d27..fea0daf77 100644
--- a/roles/openshift_version/tasks/check_available_rpms.yml
+++ b/roles/openshift_version/tasks/check_available_rpms.yml
@@ -1,7 +1,7 @@
---
- name: Get available {{ openshift_service_type}} version
repoquery:
- name: "{{ openshift_service_type}}"
+ name: "{{ openshift_service_type}}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"
ignore_excluders: true
register: rpm_results
diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index e02a75eab..3ed1d2cfe 100644
--- a/roles/openshift_version/tasks/first_master_containerized_version.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -7,6 +7,7 @@
when:
- openshift_image_tag is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
- name: Set containerized version to configure if openshift_release specified
set_fact:
@@ -20,7 +21,7 @@
docker run --rm {{ openshift_cli_image }}:latest version
register: cli_image_version
when:
- - openshift_version is not defined
+ - openshift_version is not defined or openshift_version_reinit | default(false)
- not openshift_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
@@ -34,7 +35,7 @@
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or openshift_version_reinit | default(false)
# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
# and use that value instead.
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
index 264baca65..5d92f90c6 100644
--- a/roles/openshift_version/tasks/first_master_rpm_version.yml
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -6,6 +6,7 @@
when:
- openshift_pkg_version is defined
- openshift_version is not defined
+ - not (openshift_version_reinit | default(false))
# These tasks should only be run against masters and nodes
- name: Set openshift_version for rpm installation
@@ -13,4 +14,7 @@
- set_fact:
openshift_version: "{{ rpm_results.results.versions.available_versions.0 }}"
- when: openshift_version is not defined
+ when: openshift_version is not defined or ( openshift_version_reinit | default(false) )
+- set_fact:
+ openshift_pkg_version: "-{{ rpm_results.results.versions.available_versions.0 }}"
+ when: openshift_version_reinit | default(false)
diff --git a/roles/openshift_version/tasks/masters_and_nodes.yml b/roles/openshift_version/tasks/masters_and_nodes.yml
index fbeb22d8b..eddd5ff42 100644
--- a/roles/openshift_version/tasks/masters_and_nodes.yml
+++ b/roles/openshift_version/tasks/masters_and_nodes.yml
@@ -6,9 +6,12 @@
include_tasks: check_available_rpms.yml
- name: Fail if rpm version and docker image version are different
fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ msg: "OCP rpm version {{ rpm_results.results.versions.available_versions.0 }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when: rpm_results.results.versions.available_versions.0 != openshift_version
+ when:
+ - openshift_version not in rpm_results.results.versions.available_versions.0
+ - openshift_version_reinit | default(false)
+
# block when
when: not openshift_is_atomic | bool
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
index 4f395398c..c747f73a8 100644
--- a/roles/openshift_web_console/defaults/main.yml
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -1,3 +1,2 @@
---
-# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
-openshift_web_console_nodeselector: {"region":"infra"}
+openshift_web_console_nodeselector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index 8ee95e36b..ead62799a 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -18,44 +18,124 @@
oc_project:
name: openshift-web-console
state: present
+ node_selector:
+ - ""
-- name: Make temp directory for asset config files
+- name: Make temp directory for web console templates
command: mktemp -d /tmp/console-ansible-XXXXXX
register: mktemp
changed_when: False
-- name: Copy asset config template to temp directory
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
+- name: Copy web console templates to temp directory
copy:
src: "{{ __console_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- "{{ __console_template_file }}"
+ - "{{ __console_rbac_file }}"
- "{{ __console_config_file }}"
-- name: Update asset config properties
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: logoutURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: publicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
+# Check if an existing webconsole-config config map exists. If so, use those
+# contents so we don't overwrite changes.
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ when: existing_config_map_data['webconsole-config.yaml'] is defined
+
+# Generate a new config when a config map is not defined.
+- when: existing_config_map_data['webconsole-config.yaml'] is not defined
+ block:
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- register: config
+ register: updated_console_config
+
+- name: Reconcile with the web console RBAC file
+ shell: >
+ {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_rbac_file }}" --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
-- name: Apply template file
+- name: Apply the web console template file
shell: >
{{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
- --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}"
--param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
--param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
--param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
- | {{ openshift_client_binary }} apply -f -
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- name: Verify that the web console is running
command: >
diff --git a/roles/openshift_web_console/tasks/rollout_console.yml b/roles/openshift_web_console/tasks/rollout_console.yml
new file mode 100644
index 000000000..75682ba1d
--- /dev/null
+++ b/roles/openshift_web_console/tasks/rollout_console.yml
@@ -0,0 +1,20 @@
+---
+- name: Check if console deployment exists
+ oc_obj:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ state: list
+ register: console_deployment
+
+# There's currently no command to trigger a rollout for a k8s deployment
+# without changing the pod spec. Add an annotation to force a rollout.
+- name: Rollout updated web console deployment
+ oc_edit:
+ kind: deployments
+ name: webconsole
+ namespace: openshift-web-console
+ separator: '#'
+ content:
+ spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
+ when: console_deployment.results.results.0 | length > 0
diff --git a/roles/openshift_web_console/tasks/update_asset_config.yml b/roles/openshift_web_console/tasks/update_asset_config.yml
deleted file mode 100644
index 0992b32e1..000000000
--- a/roles/openshift_web_console/tasks/update_asset_config.yml
+++ /dev/null
@@ -1,68 +0,0 @@
----
-# This task updates asset config values in the webconsole-config config map in
-# the openshift-web-console namespace. The values to set are pased in the
-# variable `asset_config_edits`, which is an array of objects with `key` and
-# `value` properties in the same format as `yedit` module `edits`. Only
-# properties passed are updated.
-#
-# Note that this triggers a redeployment on the console and a brief downtime
-# since it uses a `Recreate` strategy.
-#
-# Example usage:
-#
-# - include_role:
-# name: openshift_web_console
-# tasks_from: update_asset_config.yml
-# vars:
-# asset_config_edits:
-# - key: loggingPublicURL
-# value: "https://{{ openshift_logging_kibana_hostname }}"
-# when: openshift_web_console_install | default(true) | bool
-
-- name: Read web console config map
- oc_configmap:
- namespace: openshift-web-console
- name: webconsole-config
- state: list
- register: webconsole_config
-
-- name: Make temp directory
- command: mktemp -d /tmp/console-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- name: Copy asset config to temp file
- copy:
- content: "{{webconsole_config.results.results[0].data['webconsole-config.yaml']}}"
- dest: "{{ mktemp.stdout }}/webconsole-config.yaml"
-
-- name: Change asset config properties
- yedit:
- src: "{{ mktemp.stdout }}/webconsole-config.yaml"
- edits: "{{asset_config_edits}}"
-
-- name: Update web console config map
- oc_configmap:
- namespace: openshift-web-console
- name: webconsole-config
- state: present
- from_file:
- webconsole-config.yaml: "{{ mktemp.stdout }}/webconsole-config.yaml"
-
-- name: Remove temp directory
- file:
- state: absent
- name: "{{ mktemp.stdout }}"
- changed_when: False
-
-# There's currently no command to trigger a rollout for a k8s deployment
-# without changing the pod spec. Add an annotation to force a rollout after
-# the config map has been edited.
-- name: Rollout updated web console deployment
- oc_edit:
- kind: deployments
- name: webconsole
- namespace: openshift-web-console
- separator: '#'
- content:
- spec#template#metadata#annotations#installer-triggered-rollout: "{{ ansible_date_time.iso8601_micro }}"
diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml
new file mode 100644
index 000000000..41da2c16a
--- /dev/null
+++ b/roles/openshift_web_console/tasks/update_console_config.yml
@@ -0,0 +1,66 @@
+---
+# This task updates asset config values in the webconsole-config config map in
+# the openshift-web-console namespace. The values to set are pased in the
+# variable `console_config_edits`, which is an array of objects with `key` and
+# `value` properties in the same format as `yedit` module `edits`. Only
+# properties passed are updated. The separator for nested properties is `#`.
+#
+# Note that this triggers a redeployment on the console and a brief downtime
+# since it uses a `Recreate` strategy.
+#
+# Example usage:
+#
+# - include_role:
+# name: openshift_web_console
+# tasks_from: update_console_config.yml
+# vars:
+# console_config_edits:
+# - key: clusterInfo#loggingPublicURL
+# value: "https://{{ openshift_logging_kibana_hostname }}"
+# when: openshift_web_console_install | default(true) | bool
+
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- when: existing_config_map_data['webconsole-config.yaml'] is defined
+ block:
+ - name: Make temp directory
+ command: mktemp -d /tmp/console-ansible-XXXXXX
+ register: mktemp_console
+ changed_when: False
+
+ - name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+ - name: Change web console config properties
+ yedit:
+ src: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+ edits: "{{console_config_edits}}"
+ separator: '#'
+ state: present
+
+ - name: Update web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: present
+ from_file:
+ webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml"
+
+ - name: Remove temp directory
+ file:
+ state: absent
+ name: "{{ mktemp_console.stdout }}"
+ changed_when: False
+
+ # TODO: Only rollout if config has changed.
+ - include_tasks: rollout_console.yml
diff --git a/roles/openshift_web_console/vars/default_images.yml b/roles/openshift_web_console/vars/default_images.yml
index 7adb8a0d0..42d331ac5 100644
--- a/roles/openshift_web_console/vars/default_images.yml
+++ b/roles/openshift_web_console/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "docker.io/openshift/"
+__openshift_web_console_prefix: "docker.io/openshift/origin-"
__openshift_web_console_version: "latest"
-__openshift_web_console_image_name: "origin-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml
index 80bc56a17..e91048e38 100644
--- a/roles/openshift_web_console/vars/main.yml
+++ b/roles/openshift_web_console/vars/main.yml
@@ -2,4 +2,5 @@
__console_files_location: "../../../files/origin-components/"
__console_template_file: "console-template.yaml"
+__console_rbac_file: "console-rbac-template.yaml"
__console_config_file: "console-config.yaml"
diff --git a/roles/openshift_web_console/vars/openshift-enterprise.yml b/roles/openshift_web_console/vars/openshift-enterprise.yml
index 721ac1d27..375c22067 100644
--- a/roles/openshift_web_console/vars/openshift-enterprise.yml
+++ b/roles/openshift_web_console/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_web_console_prefix: "registry.access.redhat.com/openshift3/ose-"
__openshift_web_console_version: "v3.9"
-__openshift_web_console_image_name: "ose-web-console"
+__openshift_web_console_image_name: "web-console"
diff --git a/roles/os_firewall/tasks/firewalld.yml b/roles/os_firewall/tasks/firewalld.yml
index 4eae31596..fa933da51 100644
--- a/roles/os_firewall/tasks/firewalld.yml
+++ b/roles/os_firewall/tasks/firewalld.yml
@@ -2,7 +2,9 @@
- name: Fail - Firewalld is not supported on Atomic Host
fail:
msg: "Firewalld is not supported on Atomic Host"
- when: r_os_firewall_is_atomic | bool
+ when:
+ - r_os_firewall_is_atomic | bool
+ - not openshift_enable_unsupported_configurations | default(false)
- name: Install firewalld packages
package:
@@ -10,6 +12,7 @@
state: present
register: result
until: result is succeeded
+ when: not r_os_firewall_is_atomic | bool
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index c32872d24..3465832cc 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,4 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
-template_service_broker_selector: { "region": "infra" }
+template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 604e94602..4e6ad2ae5 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -22,6 +22,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -43,16 +48,18 @@
- name: Apply template file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift_client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}"
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -79,9 +86,15 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- file:
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is installed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index db1b558e4..48dc1327e 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -3,6 +3,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -12,11 +17,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
@@ -31,3 +36,9 @@
state: absent
name: "{{ mktemp.stdout }}"
changed_when: False
+
+- name: Rollout console so it discovers the template service broker is removed
+ include_role:
+ name: openshift_web_console
+ tasks_from: rollout_console.yml
+ when: openshift_web_console_install | default(true) | bool
diff --git a/roles/template_service_broker/vars/default_images.yml b/roles/template_service_broker/vars/default_images.yml
index 77afe1f43..dc164a4db 100644
--- a/roles/template_service_broker/vars/default_images.yml
+++ b/roles/template_service_broker/vars/default_images.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "docker.io/openshift/"
+__template_service_broker_prefix: "docker.io/openshift/origin-"
__template_service_broker_version: "latest"
-__template_service_broker_image_name: "origin"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/template_service_broker/vars/openshift-enterprise.yml b/roles/template_service_broker/vars/openshift-enterprise.yml
index dfab1e01b..b65b97691 100644
--- a/roles/template_service_broker/vars/openshift-enterprise.yml
+++ b/roles/template_service_broker/vars/openshift-enterprise.yml
@@ -1,4 +1,4 @@
---
-__template_service_broker_prefix: "registry.access.redhat.com/openshift3/"
+__template_service_broker_prefix: "registry.access.redhat.com/openshift3/ose-"
__template_service_broker_version: "v3.7"
-__template_service_broker_image_name: "ose"
+__template_service_broker_image_name: "template-service-broker"
diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml
index 4a28d47b2..5129f4471 100644
--- a/roles/tuned/tasks/main.yml
+++ b/roles/tuned/tasks/main.yml
@@ -28,7 +28,12 @@
when: item.state == 'file'
- name: Make tuned use the recommended tuned profile on restart
- file: path=/etc/tuned/active_profile state=absent
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/tuned/active_profile
+ - /etc/tuned/profile_mode
- name: Restart tuned service
systemd: