summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml170
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml94
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml82
-rw-r--r--roles/openshift_logging/templates/curator.j25
4 files changed, 197 insertions, 154 deletions
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 28fad420b..3e36f9d0c 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -5,60 +5,47 @@
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+### evaluate if the PVC attached to the dc currently matches the provided vars
+## if it does then we reuse that pvc in the DC
+- include: set_es_storage.yaml
vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ es_component: es
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
-
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
- vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
+ loop_var: deployment
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
+ es_component: es
+ es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -80,67 +67,46 @@
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+- include: set_es_storage.yaml
vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ es_component: es-ops
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
loop_control:
- loop_var: deploy_name
+ loop_var: deployment
when:
- openshift_logging_use_ops | bool
+## if it does not then we should create one that does and attach it
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
-
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
+ es_component: es-ops
+ es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index cb9509de1..c4db7d033 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -1,52 +1,52 @@
---
-- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
+- oc_obj:
+ kind: "{{ file_content.kind }}"
+ name: "{{ file_content.metadata.name }}"
+ state: present
+ namespace: "{{ namespace }}"
+ files:
+ - "{{ file_name }}"
+ when: file_content.kind != "Service"
-- name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
+## still need to do this for services until the template logic is replaced by oc_*
+- block:
+ - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
-- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_changed
- failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
- changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
- when:
- - "'field is immutable' not in generation_apply.stderr"
+ - name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
-- name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
-- name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
+ when: file_content.kind == "Service"
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
new file mode 100644
index 000000000..198b1d04d
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -0,0 +1,82 @@
+---
+- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
+ when: es_spec.volumes is defined
+
+- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: es_storage_claim=""
+ when:
+ - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
+
+## take an ES dc and evaluate its storage option
+# if it is a hostmount or emptydir we don't do anything with it
+# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
+- oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ es_storage_claim }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ register: pvc_spec
+ failed_when: pvc_spec.results.stderr is defined
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
+ when:
+ - pvc_spec.results is defined
+ - pvc_spec.results.results[0].spec is defined
+
+# if not create the pvc and use it
+- block:
+
+ - name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: not es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+
+ when:
+ - es_pvc_size | search('^\d.*')
+ - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: "{{ es_component }}"
+ deploy_name: "{{ es_name }}"
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{ es_component }}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{ es_cpu_limit }}"
+ es_memory_limit: "{{ es_memory_limit }}"
+ es_node_selector: "{{ es_node_selector }}"
+ es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
+ es_number_of_shards: "{{ es_number_of_shards }}"
+ es_number_of_replicas: "{{ es_number_of_replicas }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index a0fefd882..c6284166b 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}