summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging_elasticsearch
diff options
context:
space:
mode:
authorOpenShift Bot <eparis+openshiftbot@redhat.com>2017-05-23 20:20:41 -0500
committerGitHub <noreply@github.com>2017-05-23 20:20:41 -0500
commitf4061b9ad327ddd294b16285462c5bdde10945a6 (patch)
treee7e1d824f9d81b1db34a02c1bb32eb245fac9c9c /roles/openshift_logging_elasticsearch
parentdc9786a20225e970edf45daf2038e419f2fe7802 (diff)
parent68f8a50abc0acd3005dbd98d0e7964bcf7900e26 (diff)
downloadopenshift-f4061b9ad327ddd294b16285462c5bdde10945a6.tar.gz
openshift-f4061b9ad327ddd294b16285462c5bdde10945a6.tar.bz2
openshift-f4061b9ad327ddd294b16285462c5bdde10945a6.tar.xz
openshift-f4061b9ad327ddd294b16285462c5bdde10945a6.zip
Merge pull request #3509 from ewolinetz/logging_component_subroles
Merged by openshift-bot
Diffstat (limited to 'roles/openshift_logging_elasticsearch')
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml57
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh79
-rw-r--r--roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml15
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml19
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml278
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j272
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j281
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j2126
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j227
-rw-r--r--roles/openshift_logging_elasticsearch/templates/rolebinding.j214
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml12
12 files changed, 789 insertions, 0 deletions
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
new file mode 100644
index 000000000..7923059da
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -0,0 +1,57 @@
+---
+### Common settings
+openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_elasticsearch_namespace: logging
+
+openshift_logging_elasticsearch_nodeselector: ""
+openshift_logging_elasticsearch_cpu_limit: 100m
+openshift_logging_elasticsearch_memory_limit: 512Mi
+openshift_logging_elasticsearch_recover_after_time: 5m
+
+openshift_logging_elasticsearch_replica_count: 1
+
+# ES deployment type
+openshift_logging_elasticsearch_deployment_type: "data-master"
+
+# ES deployment name
+openshift_logging_elasticsearch_deployment_name: ""
+
+# One of ['emptydir', 'pvc', 'hostmount']
+openshift_logging_elasticsearch_storage_type: "emptydir"
+
+# hostmount options
+openshift_logging_elasticsearch_hostmount_path: ""
+
+# pvc options
+# the name of the PVC we will bind to -- create it if it does not exist
+openshift_logging_elasticsearch_pvc_name: ""
+
+# required if the PVC does not already exist
+openshift_logging_elasticsearch_pvc_size: ""
+openshift_logging_elasticsearch_pvc_dynamic: false
+openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
+openshift_logging_elasticsearch_storage_group: '65534'
+
+openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_elasticsearch_ops_deployment: false
+
+openshift_logging_elasticsearch_ops_allow_cluster_reader: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+
+
+openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
new file mode 100644
index 000000000..339b5a1b2
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh
@@ -0,0 +1,79 @@
+CA=${1:-/etc/openshift/logging/ca.crt}
+KEY=${2:-/etc/openshift/logging/system.admin.key}
+CERT=${3:-/etc/openshift/logging/system.admin.crt}
+openshift_logging_es_host=${4:-logging-es}
+openshift_logging_es_port=${5:-9200}
+namespace=${6:-logging}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# skip indices that contain a uuid
+# get a list of unique project
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_proj_uuid_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
+ echo "No Elasticsearch pods found running. Cannot update common data model."
+ exit 1
+fi
+
+count=$(get_list_of_indices | wc -l)
+if [ $count -eq 0 ]; then
+ echo No matching indices found - skipping update_for_uuid
+else
+ echo Creating aliases for $count index patterns . . .
+ {
+ echo '{"actions":['
+ get_list_of_indices | \
+ while IFS=. read proj ; do
+ # e.g. make test.uuid.* an alias of test.* so we can search for
+ # /test.uuid.*/_search and get both the test.uuid.* and
+ # the test.* indices
+ uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
+ [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+ } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
+fi
+
+count=$(get_list_of_proj_uuid_indices | wc -l)
+if [ $count -eq 0 ] ; then
+ echo No matching indexes found - skipping update_for_common_data_model
+ exit 0
+fi
+
+echo Creating aliases for $count index patterns . . .
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+{
+ echo '{"actions":['
+ get_list_of_proj_uuid_indices | \
+ while IFS=. read proj uuid ; do
+ # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
+ # /project.test.uuid.*/_search and get both the test.uuid.* and
+ # the project.test.uuid.* indices
+ echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
new file mode 100644
index 000000000..567c9f289
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: rolebinding-reader
+rules:
+- resources:
+ - clusterrolebindings
+ verbs:
+ - get
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
new file mode 100644
index 000000000..097270772
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Elasticsearch Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
new file mode 100644
index 000000000..1a952b5cf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -0,0 +1,19 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ es_version: "{{ __latest_es_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+- debug: var=openshift_logging_image_version
+
+# should we just assume that we will have the correct major version?
+- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Elasticsearch
+ when: es_version not in __allowed_es_versions
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
new file mode 100644
index 000000000..7e88a7498
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -0,0 +1,278 @@
+---
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
+
+- fail:
+ msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed
+ when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types
+
+- set_fact:
+ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# rolebinding reader
+- copy:
+ src: rolebinding-reader.yml
+ dest: "{{ tempdir }}/rolebinding-reader.yml"
+
+- name: Create rolebinding-reader role
+ oc_obj:
+ state: present
+ name: "rolebinding-reader"
+ kind: clusterrole
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/rolebinding-reader.yml"
+ delete_after: true
+
+# SA roles
+- name: Set rolebinding-reader permissions for ES
+ oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: rolebinding-reader
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+
+# View role and binding
+- name: Generate logging-elasticsearch-view-role
+ template:
+ src: rolebinding.j2
+ dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ changed_when: no
+
+- name: Set logging-elasticsearch-view-role role
+ oc_obj:
+ state: present
+ name: "logging-elasticsearch-view-role"
+ kind: rolebinding
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ delete_after: true
+
+# configmap
+- template:
+ src: elasticsearch-logging.yml.j2
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+- template:
+ src: elasticsearch.yml.j2
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ vars:
+ allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ es_logging_contents }}"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ es_config_contents }}"
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+- name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+
+
+# secret
+- name: Set ES secret
+ oc_secret:
+ state: present
+ name: "logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+
+# services
+- name: Set logging-{{ es_component }}-cluster service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}-cluster"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9300
+
+- name: Set logging-{{ es_component }} service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - not openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+
+- set_fact:
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ when: openshift_logging_elasticsearch_deployment_name == ""
+
+- set_fact:
+ es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}"
+ when: openshift_logging_elasticsearch_deployment_name != ""
+
+# DC
+- name: Set ES dc templates
+ template:
+ src: es.j2
+ dest: "{{ tempdir }}/templates/logging-es-dc.yml"
+ vars:
+ es_cluster_name: "{{ es_component }}"
+ component: "{{ es_component }}"
+ logging_component: elasticsearch
+ deploy_name: "{{ es_deploy_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
+ es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
+ replicas: 1
+
+- name: Set ES dc
+ oc_obj:
+ state: present
+ name: "{{ es_deploy_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
+ delete_after: true
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
new file mode 100644
index 000000000..377abe21f
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
@@ -0,0 +1,72 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: WARN
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+ io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
+ io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+ # search-guard
+ com.floragunn.searchguard: WARN
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
new file mode 100644
index 000000000..681f5a7e6
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -0,0 +1,81 @@
+cluster:
+ name: ${CLUSTER_NAME}
+
+script:
+ inline: on
+ indexed: on
+
+index:
+ number_of_shards: {{ es_number_of_shards | default ('1') }}
+ number_of_replicas: {{ es_number_of_replicas | default ('0') }}
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+node:
+ master: ${IS_MASTER}
+ data: ${HAS_DATA}
+
+network:
+ host: 0.0.0.0
+
+cloud:
+ kubernetes:
+ service: ${SERVICE_DNS}
+ namespace: ${NAMESPACE}
+
+discovery:
+ type: kubernetes
+ zen.ping.multicast.enabled: false
+ zen.minimum_master_nodes: ${NODE_QUORUM}
+
+gateway:
+ recover_after_nodes: ${NODE_QUORUM}
+ expected_nodes: ${RECOVER_EXPECTED_NODES}
+ recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+
+openshift.config:
+ use_common_data_model: true
+ project_index_prefix: "project"
+ time_field_name: "@timestamp"
+
+openshift.searchguard:
+ keystore.path: /etc/elasticsearch/secret/admin.jks
+ truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}}
+
+path:
+ data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+ logs: /elasticsearch/${CLUSTER_NAME}/logs
+ work: /elasticsearch/${CLUSTER_NAME}/work
+ scripts: /elasticsearch/${CLUSTER_NAME}/scripts
+
+searchguard:
+ authcz.admin_dn:
+ - CN=system.admin,OU=OpenShift,O=Logging
+ config_index_name: ".searchguard.${HOSTNAME}"
+ ssl:
+ transport:
+ enabled: true
+ enforce_hostname_verification: false
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+ keystore_password: kspass
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+ truststore_password: tspass
+ http:
+ enabled: true
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/key
+ keystore_password: kspass
+ clientauth_mode: OPTIONAL
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/truststore
+ truststore_password: tspass
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
new file mode 100644
index 000000000..e129205ca
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -0,0 +1,126 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(1)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-elasticsearch
+ securityContext:
+ supplementalGroups:
+ - {{openshift_logging_elasticsearch_storage_group}}
+{% if es_node_selector is iterable and es_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in es_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ -
+ name: "elasticsearch"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: "{{es_memory_limit}}"
+{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+ cpu: "{{es_cpu_limit}}"
+{% endif %}
+ requests:
+ memory: "512Mi"
+ ports:
+ -
+ containerPort: 9200
+ name: "restapi"
+ -
+ containerPort: 9300
+ name: "cluster"
+ env:
+ -
+ name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ -
+ name: "KUBERNETES_TRUST_CERT"
+ value: "true"
+ -
+ name: "SERVICE_DNS"
+ value: "logging-{{es_cluster_name}}-cluster"
+ -
+ name: "CLUSTER_NAME"
+ value: "logging-{{es_cluster_name}}"
+ -
+ name: "INSTANCE_RAM"
+ value: "{{openshift_logging_elasticsearch_memory_limit}}"
+ -
+ name: "NODE_QUORUM"
+ value: "{{es_node_quorum | int}}"
+ -
+ name: "RECOVER_EXPECTED_NODES"
+ value: "{{es_recover_expected_nodes}}"
+ -
+ name: "RECOVER_AFTER_TIME"
+ value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+ -
+ name: "IS_MASTER"
+ value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+ -
+ name: "HAS_DATA"
+ value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+
+ volumeMounts:
+ - name: elasticsearch
+ mountPath: /etc/elasticsearch/secret
+ readOnly: true
+ - name: elasticsearch-config
+ mountPath: /usr/share/java/elasticsearch/config
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
+ volumes:
+ - name: elasticsearch
+ secret:
+ secretName: logging-elasticsearch
+ - name: elasticsearch-config
+ configMap:
+ name: logging-elasticsearch
+ - name: elasticsearch-storage
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+ hostPath:
+ path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
new file mode 100644
index 000000000..f19a3a750
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
diff --git a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
new file mode 100644
index 000000000..fcd4e87cc
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+ name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% endfor %}
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
new file mode 100644
index 000000000..7a1f5048b
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -0,0 +1,12 @@
+---
+__latest_es_version: "3_5"
+__allowed_es_versions: ["3_5", "3_6"]
+__allowed_es_types: ["data-master", "data-client", "master", "client"]
+
+# TODO: integrate these
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
+es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}"
+es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
+es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"