summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--roles/openshift_logging/meta/main.yaml3
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml46
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml8
-rw-r--r--roles/openshift_logging/tasks/scale.yaml28
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml54
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml54
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml8
-rw-r--r--roles/openshift_metrics/meta/main.yaml1
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml48
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml9
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/scale.yaml30
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml27
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml27
14 files changed, 190 insertions, 155 deletions
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index 7050e51db..9c480f73a 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: OpenShift Red Hat
- description: OpenShift Embedded Router
+ description: OpenShift Aggregated Logging
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.2
@@ -12,4 +12,5 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_facts
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 9b1c004f2..d12811d56 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -1,25 +1,31 @@
---
+- name: Getting current ES deployment size
+ set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
+
- name: Generate PersistentVolumeClaims
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+# we should initialize the es_dc_pool with the current keys
- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
+ with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ loop_control:
+ loop_var: deploy_name
+
+# This should be used to generate new DC names if necessary
+- name: Create new DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool + [deploy_name]}}
vars:
component: es
es_cluster_name: "{{component}}"
deploy_name_prefix: "logging-{{component}}"
deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ with_sequence: count={{ openshift_logging_current_es_size | int - openshift_logging_es_cluster_size | int }}
check_mode: no
-
- name: Generate Elasticsearch DeploymentConfig
template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
vars:
@@ -35,14 +41,15 @@
deploy_name: "{{item.1}}"
es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
with_indexed_items:
- - "{{es_dc_pool | default([])}}"
+ - "{{ es_dc_pool }}"
check_mode: no
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
changed_when: no
# --------- Tasks for Operation clusters ---------
+- name: Getting current ES deployment size
+ set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
+
- name: Validate Elasticsearch cluster size for Ops
fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
vars:
@@ -65,21 +72,27 @@
openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
-- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
- set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+- name: Init pool of DeploymentConfig names for Elasticsearch Ops
+ set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
+ with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ loop_control:
+ loop_var: deploy_name
+ when:
+ - openshift_logging_use_ops
+
+- name: Create new DeploymentConfig names for Elasticsearch Ops
+ set_fact: es_ops_dc_pool={{es_ops_dc_pool + [deploy_name]}}
vars:
component: es-ops
es_cluster_name: "{{component}}"
deploy_name_prefix: "logging-{{component}}"
deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
- with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ with_sequence: count={{ openshift_logging_current_es_ops_size | int - openshift_logging_es_ops_cluster_size | int }}
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
- name: Generate Elasticsearch DeploymentConfig for Ops
@@ -101,9 +114,8 @@
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
with_indexed_items:
- - "{{es_dc_pool_ops | default([])}}"
+ - "{{ es_ops_dc_pool | default([]) }}"
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a9699adb8..8a33498cd 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -8,8 +8,12 @@
check_mode: no
- name: Validate Elasticsearch cluster size
- fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
- when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size
- name: Install logging
include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
deleted file mode 100644
index 125d3b8af..000000000
--- a/roles/openshift_logging/tasks/scale.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: replica_count
- failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
- --replicas={{desired}} -n {{openshift_logging_namespace}}
- register: scale_result
- failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
- when:
- - not ansible_check_mode
- - replica_count.stdout|int != desired
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}'
- register: replica_counts
- until: replica_counts.stdout|int == desired
- retries: 30
- delay: 10
- when:
- - not ansible_check_mode
- - replica_count.stdout|int != desired
- changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 2d8af1385..07489ae79 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -26,9 +26,12 @@
changed_when: no
- name: start elasticsearch
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -40,9 +43,12 @@
changed_when: no
- name: start kibana
- include: scale.yaml
- vars:
- desired: "{{ openshift_logging_kibana_replica_count | default (1) }}"
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -54,9 +60,12 @@
changed_when: no
- name: start curator
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -68,9 +77,12 @@
changed_when: no
- name: start elasticsearch-ops
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -83,9 +95,12 @@
changed_when: no
- name: start kibana-ops
- include: scale.yaml
- vars:
- desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -98,9 +113,12 @@
changed_when: no
- name: start curator-ops
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index c8e8fbd2c..8e0df8344 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -24,9 +24,12 @@
changed_when: no
- name: stop elasticsearch
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -37,9 +40,12 @@
changed_when: no
- name: stop kibana
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -50,9 +56,12 @@
changed_when: no
- name: stop curator
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -63,9 +72,12 @@
changed_when: no
- name: stop elasticsearch-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -77,9 +89,12 @@
changed_when: no
- name: stop kibana-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -91,9 +106,12 @@
changed_when: no
- name: stop curator-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index a93463239..cceacd538 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -14,9 +14,11 @@
check_mode: no
- name: start elasticsearch
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index 68e94992e..50214135c 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -15,4 +15,5 @@ galaxy_info:
categories:
- openshift
dependencies:
+- { role: lib_openshift }
- { role: openshift_facts }
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 9cf4afee0..9333d341c 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -19,25 +19,53 @@
- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-truststore.pwd
register: hawkular_truststore_password
+- stat: path="{{openshift_metrics_certs_dir}}/{{item}}"
+ register: pwd_file_stat
+ with_items:
+ - hawkular-metrics.pwd
+ - hawkular-metrics.htpasswd
+ - hawkular-jgroups-keystore.pwd
+ changed_when: no
+
+- set_fact:
+ pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
+ with_items: "{{pwd_file_stat.results}}"
+ changed_when: no
+
+- name: Create temp directory local on control node
+ local_action: command mktemp -d
+ register: local_tmp
+ changed_when: False
+
- name: generate password for hawkular metrics and jgroups
- copy:
- dest: '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'
- content: "{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
- hawkular-jgroups-keystore
- when: not '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'|exists
+ when: "not pwd_files['{{ item }}.pwd'].exists"
- name: generate htpasswd file for hawkular metrics
- shell: >
- htpasswd -ci
- '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd' hawkular
- < '{{ openshift_metrics_certs_dir }}/hawkular-metrics.pwd'
- when: >
- not '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd'|exists
+ local_action: >
+ shell htpasswd -ci
+ '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular
+ < '{{ local_tmp.stdout }}/hawkular-metrics.pwd'
+ when: "not pwd_files['hawkular-metrics.htpasswd'].exists"
+
+- name: copy local generated passwords to target
+ copy:
+ src: "{{local_tmp.stdout}}/{{item}}"
+ dest: "{{openshift_metrics_certs_dir}}/{{item}}"
+ with_items:
+ - hawkular-metrics.pwd
+ - hawkular-metrics.htpasswd
+ - hawkular-jgroups-keystore.pwd
+ when: "not pwd_files['{{ item }}'].exists"
- include: import_jks_certs.yaml
+- local_action: file path="{{local_tmp.stdout}}" state=absent
+ changed_when: False
+
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index f5192b005..16fd8d9f8 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -29,10 +29,6 @@
- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd
register: jgroups_keystore_password
- - local_action: command mktemp -d
- register: local_tmp
- changed_when: False
-
- fetch:
dest: "{{local_tmp.stdout}}/"
src: "{{ openshift_metrics_certs_dir }}/{{item}}"
@@ -60,11 +56,6 @@
src: "{{item}}"
with_fileglob: "{{local_tmp.stdout}}/*.*store"
- - file:
- path: "{{local_tmp.stdout}}"
- state: absent
- changed_when: False
-
when: not metrics_keystore.stat.exists or
not metrics_truststore.stat.exists or
not cassandra_keystore.stat.exists or
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 1808db5d5..d03d4176b 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- name: Create temp directory for doing work in
+- name: Create temp directory for doing work in on target
command: mktemp -td openshift-metrics-ansible-XXXXXX
register: mktemp
changed_when: False
diff --git a/roles/openshift_metrics/tasks/scale.yaml b/roles/openshift_metrics/tasks/scale.yaml
deleted file mode 100644
index bb4fa621b..000000000
--- a/roles/openshift_metrics/tasks/scale.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
- -o jsonpath='{.spec.replicas}' -n {{openshift_metrics_project}}
- register: replica_count
- failed_when: "replica_count.rc == 1 and 'exists' not in replica_count.stderr"
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
- --replicas={{desired}} -n {{openshift_metrics_project}}
- register: scale_result
- failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
- when:
- - replica_count.stdout != (desired | string)
- - not ansible_check_mode
- changed_when: no
-
-- name: Waiting for {{object}} to scale to {{desired}}
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get {{object}} -n {{openshift_metrics_project|quote}} -o jsonpath='{.status.replicas}'
- register: replica_counts
- until: replica_counts.stdout.find("{{desired}}") != -1
- retries: 30
- delay: 10
- when:
- - replica_count.stdout != (desired | string)
- - not ansible_check_mode
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index c4cae4aff..f02774e47 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -10,9 +10,12 @@
changed_when: no
- name: Start Hawkular Cassandra
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 1
with_items: "{{metrics_cassandra_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -28,9 +31,12 @@
changed_when: no
- name: Start Hawkular Metrics
- include: scale.yaml
- vars:
- desired: "{{openshift_metrics_hawkular_replicas}}"
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: "{{openshift_metrics_hawkular_replicas}}"
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -46,9 +52,12 @@
changed_when: no
- name: Start Heapster
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 1
with_items: "{{metrics_heapster_rc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index bae181e3e..5a73443a8 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -11,9 +11,12 @@
check_mode: no
- name: Stop Heapster
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_heapster_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -29,9 +32,12 @@
changed_when: "'No resources found' not in metrics_hawkular_rc.stderr"
- name: Stop Hawkular Metrics
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -46,9 +52,12 @@
changed_when: "'No resources found' not in metrics_cassandra_rc.stderr"
- name: Stop Hawkular Cassandra
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_cassandra_rc.stdout_lines}}"
loop_control:
loop_var: object