summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging/tasks
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_logging/tasks')
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml114
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml217
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml117
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml36
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml49
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml21
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml77
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml87
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml51
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml107
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml58
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml49
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml54
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml29
-rw-r--r--roles/openshift_logging/tasks/main.yaml40
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml29
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml52
-rw-r--r--roles/openshift_logging/tasks/scale.yaml28
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml104
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml97
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml41
27 files changed, 1626 insertions, 0 deletions
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
new file mode 100644
index 000000000..908f3ee88
--- /dev/null
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -0,0 +1,114 @@
+---
+- name: stop logging
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete logging api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - dc
+ - rc
+ - svc
+ - routes
+ - templates
+ - daemonset
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+
+# delete the oauthclient
+- name: delete oauthclient kibana-proxy
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete any image streams that we may have created
+- name: delete logging is
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete logging secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-fluentd
+ - logging-elasticsearch
+ - logging-kibana
+ - logging-kibana-proxy
+ - logging-curator
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete role bindings
+- name: delete rolebindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-elasticsearch-view-role
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster roles
+- name: delete cluster roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - aggregated-logging-elasticsearch
+ - aggregated-logging-kibana
+ - aggregated-logging-curator
+ - aggregated-logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our roles
+- name: delete roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - daemonset-admin
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our configmaps
+- name: delete configmaps
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-curator
+ - logging-elasticsearch
+ - logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
new file mode 100644
index 000000000..e16071e46
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -0,0 +1,217 @@
+---
+# we will ensure our secrets and configmaps are set up here first
+- name: Checking for ca.key
+ stat: path="{{generated_certs_dir}}/ca.key"
+ register: ca_key_file
+ check_mode: no
+
+- name: Checking for ca.crt
+ stat: path="{{generated_certs_dir}}/ca.crt"
+ register: ca_cert_file
+ check_mode: no
+
+- name: Checking for ca.serial.txt
+ stat: path="{{generated_certs_dir}}/ca.serial.txt"
+ register: ca_serial_file
+ check_mode: no
+
+- name: Generate certificates
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ check_mode: no
+ when:
+ - not ca_key_file.stat.exists
+ - not ca_cert_file.stat.exists
+ - not ca_serial_file.stat.exists
+
+- name: Checking for signing.conf
+ stat: path="{{generated_certs_dir}}/signing.conf"
+ register: signing_conf_file
+ check_mode: no
+
+- template: src=signing.conf.j2 dest={{generated_certs_dir}}/signing.conf
+ vars:
+ - top_dir: '{{generated_certs_dir}}'
+ when: not signing_conf_file.stat.exists
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: kibana
+ - procure_component: kibana-ops
+ - procure_component: kibana-internal
+ hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+
+- name: Copy proxy TLS configuration file
+ copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is undefined
+ check_mode: no
+
+- name: Copy proxy TLS configuration file
+ copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is defined
+ check_mode: no
+
+- name: Checking for ca.db
+ stat: path="{{generated_certs_dir}}/ca.db"
+ register: ca_db_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.db
+ check_mode: no
+ when:
+ - not ca_db_file.stat.exists
+
+- name: Checking for ca.crt.srl
+ stat: path="{{generated_certs_dir}}/ca.crt.srl"
+ register: ca_cert_srl_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl
+ check_mode: no
+ when:
+ - not ca_cert_srl_file.stat.exists
+
+- name: Generate PEM certs
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.fluentd
+ - system.logging.kibana
+ - system.logging.curator
+ - system.admin
+ loop_control:
+ loop_var: node_name
+
+- name: Check for jks-generator service account
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}}
+ register: serviceaccount_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Create jks-generator service account
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
+
+- name: Check for hostmount-anyuid scc entry
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}'
+ register: scc_result
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: Add to hostmount-anyuid scc
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}}
+ when:
+ - not ansible_check_mode
+ - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1
+
+- name: Copy JKS generation script
+ copy:
+ src: generate-jks.sh
+ dest: "{{generated_certs_dir}}/generate-jks.sh"
+ check_mode: no
+
+- name: Generate JKS pod template
+ template:
+ src: jks_pod.j2
+ dest: "{{mktemp.stdout}}/jks_pod.yaml"
+ check_mode: no
+ changed_when: no
+
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+ stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+ register: elasticsearch_jks
+ check_mode: no
+
+- name: Checking for logging-es.jks
+ stat: path="{{generated_certs_dir}}/logging-es.jks"
+ register: logging_es_jks
+ check_mode: no
+
+- name: Checking for system.admin.jks
+ stat: path="{{generated_certs_dir}}/system.admin.jks"
+ register: system_admin_jks
+ check_mode: no
+
+- name: Checking for truststore.jks
+ stat: path="{{generated_certs_dir}}/truststore.jks"
+ register: truststore_jks
+ check_mode: no
+
+- name: create JKS generation pod
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name
+ register: podoutput
+ check_mode: no
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}}
+ register: result
+ until: result.stdout.find("Succeeded") != -1
+ retries: 5
+ delay: 10
+ changed_when: no
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+# check for secret/logging-kibana-proxy
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}'
+ register: kibana_secret_oauth_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}'
+ register: kibana_secret_session_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# check for oauthclient secret
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}'
+ register: oauth_secret_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# set or generate as needed
+- name: Generate proxy session
+ set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == ''
+
+- name: Generate proxy session
+ set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is defined
+ - kibana_secret_session_check.stdout != ''
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
+ check_mode: no
+ when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == ''
+ or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == ''
+ or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}}
+ check_mode: no
+ when:
+ - kibana_secret_oauth_check is defined
+ - kibana_secret_oauth_check.stdout != ''
+ - oauth_secret_check.stdout is defined
+ - oauth_secret_check.stdout != ''
+ - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..56f590717
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,13 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: aggregated-logging-elasticsearch
+ obj_name: rolebinding-reader
+ crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_logging_namespace}}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
new file mode 100644
index 000000000..0b8b1014c
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate ClusterRole for cluster-reader
+ template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
+ vars:
+ obj_name: rolebinding-reader
+ rules:
+ - resources: [clusterrolebindings]
+ verbs:
+ - get
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
new file mode 100644
index 000000000..b24a7c342
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -0,0 +1,117 @@
+---
+- block:
+ - copy:
+ src: elasticsearch-logging.yml
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+ - template:
+ src: elasticsearch.yml.j2
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ vars:
+ - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{es_logging_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{es_config_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
+ --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
+ register: es_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{es_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
+ when: es_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: curator.yml
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{curator_config_contents}}"
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contenets is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
+ --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
+ register: curator_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{curator_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
+ when: curator_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_throttle_contents}}"
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
+ register: fluentd_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
+ when: fluentd_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
new file mode 100644
index 000000000..8aea4e81f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
@@ -0,0 +1,65 @@
+---
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
+ vars:
+ component: es
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
+ vars:
+ component: es-ops
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
+ vars:
+ component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ openshift_logging_es_host: logging-es-ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
new file mode 100644
index 000000000..289b72ea6
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -0,0 +1,36 @@
+---
+- name: Checking for {{component}}.key
+ stat: path="{{generated_certs_dir}}/{{component}}.key"
+ register: key_file
+ check_mode: no
+
+- name: Checking for {{component}}.crt
+ stat: path="{{generated_certs_dir}}/{{component}}.crt"
+ register: cert_file
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is defined
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is undefined
+ check_mode: no
+
+- name: Sign cert request with CA for {{component}}
+ command: >
+ openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt
+ -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext
+ when:
+ - not cert_file.stat.exists
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
new file mode 100644
index 000000000..601ec9e83
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -0,0 +1,49 @@
+---
+- name: Init pool of PersistentVolumeClaim names
+ set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
+ vars:
+ pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"
+ start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}"
+ with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}}
+ when:
+ - openshift_logging_es_pvc_size | search('^\d.*')
+ - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+- name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool | default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - not openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
+
+- name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool|default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
new file mode 100644
index 000000000..7dc9530df
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generate RoleBindings
+ template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
new file mode 100644
index 000000000..25877ebff
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -0,0 +1,21 @@
+---
+- name: Generating logging routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml
+ tags: routes
+ vars:
+ obj_name: "{{route_info.name}}"
+ route_host: "{{route_info.host}}"
+ service_name: "{{route_info.name}}"
+ tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ with_items:
+ - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"}
+ - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}
+ loop_control:
+ loop_var: route_info
+ when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana'
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..1829acaee
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -0,0 +1,77 @@
+---
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_key", file: "system.logging.kibana.key"}
+ - { name: "kibana_cert", file: "system.logging.kibana.crt"}
+ - { name: "curator_key", file: "system.logging.curator.key"}
+ - { name: "curator_cert", file: "system.logging.curator.crt"}
+ - { name: "fluentd_key", file: "system.logging.fluentd.key"}
+ - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+
+- name: Generating secrets for logging components
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-{{component}}
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ secret_keys: ["ca", "cert", "key"]
+ with_items:
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: component
+ when: secret_name not in openshift_logging_facts.{{component}}.secrets or
+ secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for kibana proxy
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-kibana-proxy
+ secrets:
+ - {key: oauth-secret, value: "{{oauth_secret}}"}
+ - {key: session-secret, value: "{{session_secret}}"}
+ - {key: server-key, value: "{{kibana_key_file}}"}
+ - {key: server-cert, value: "{{kibana_cert_file}}"}
+ - {key: server-tls, value: "{{server_tls_file}}"}
+ secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
+ kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
+ kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
+ server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
+ when: secret_name not in openshift_logging_facts.kibana.secrets or
+ secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for elasticsearch
+ command: >
+ {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
+ key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
+ searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
+ admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
+ admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
+ vars:
+ secret_name: logging-elasticsearch
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ register: logging_es_secret
+ when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
+ secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
+ when: logging_es_secret.stdout is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..21bcdfecb
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
+ vars:
+ obj_name: aggregated-logging-{{component}}
+ with_items:
+ - elasticsearch
+ - kibana
+ - fluentd
+ - curator
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
new file mode 100644
index 000000000..8eaac76c4
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -0,0 +1,87 @@
+---
+- name: Generating logging-es service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
+ vars:
+ obj_name: logging-es
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
+ vars:
+ obj_name: logging-kibana
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
+ vars:
+ obj_name: logging-es-ops
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-ops-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
+ vars:
+ obj_name: logging-kibana-ops
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
new file mode 100644
index 000000000..8f2825552
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -0,0 +1,51 @@
+---
+- name: Check Curator current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Curator ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
+ vars:
+ component: curator
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
+ replicas: "{{curator_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
+ replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
new file mode 100644
index 000000000..fbba46a35
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -0,0 +1,107 @@
+---
+- name: Generate PersistentVolumeClaims
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+
+- name: Init pool of DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ vars:
+ component: es
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_memory_limit}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ with_indexed_items:
+ - "{{es_dc_pool | default([])}}"
+ check_mode: no
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ changed_when: no
+
+# --------- Tasks for Operation clusters ---------
+
+- name: Validate Elasticsearch cluster size for Ops
+ fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ vars:
+ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}"
+ check_mode: no
+
+- name: Generate PersistentVolumeClaims for Ops
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
+ openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
+ openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
+ openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"
+ openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
+ set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+ vars:
+ component: es-ops
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Generate Elasticsearch DeploymentConfig for Ops
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es-ops
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
+ es_node_quorum: "{{es_ops_node_quorum}}"
+ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
+ es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
+ openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ with_indexed_items:
+ - "{{es_dc_pool_ops | default([])}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
new file mode 100644
index 000000000..4c510c6e7
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -0,0 +1,54 @@
+---
+- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Generating Fluentd daemonset
+ template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ ops_host: "{{ fluentd_ops_host }}"
+ ops_port: "{{ fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check fluentd privileged permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/privileged -o jsonpath='{.users}'
+ register: fluentd_privileged
+ check_mode: no
+ changed_when: no
+
+- name: "Set privileged permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ check_mode: no
+ when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check fluentd cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: fluentd_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ check_mode: no
+ when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
new file mode 100644
index 000000000..de4b018dd
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -0,0 +1,58 @@
+---
+- name: Check Kibana current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Kibana ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
+ replicas: "{{kibana_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
+ replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
new file mode 100644
index 000000000..af03e9371
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -0,0 +1,49 @@
+---
+- name: Gather OpenShift Logging Facts
+ openshift_logging_facts:
+ oc_bin: "{{openshift.common.client_binary}}"
+ admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ openshift_logging_namespace: "{{openshift_logging_namespace}}"
+ tags: logging_facts
+ check_mode: no
+
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+
+- name: Install logging
+ include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
+ when: openshift_hosted_logging_install | default(true) | bool
+ with_items:
+ - support
+ - elasticsearch
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: install_component
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_logging_namespace }}"
+ - file_name: "{{ file }}"
+ - file_content: "{{ lookup('file', file) | from_yaml }}"
+ with_fileglob:
+ - "{{ mktemp.stdout }}/templates/*.yaml"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg="{{lookup('file', file)|quote}}"
+ with_fileglob:
+ - "{{mktemp.stdout}}/templates/*.yaml"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
new file mode 100644
index 000000000..da0bbb627
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -0,0 +1,54 @@
+---
+# This is the base configuration for installing the other components
+- name: Check for logging project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
+ register: logging_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in logging_project_result.stderr
+
+- name: Create logging cert directory
+ file: path={{openshift.common.config_base}}/logging state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: generate_configmaps.yaml
+
+- include: generate_services.yaml
+
+- name: Generate kibana-proxy oauth client
+ template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
+ vars:
+ secret: "{{oauth_secret}}"
+ when: oauth_secret is defined
+ check_mode: no
+ changed_when: no
+
+- include: generate_clusterroles.yaml
+
+- include: generate_rolebindings.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
+
+- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
new file mode 100644
index 000000000..aecb5d81b
--- /dev/null
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -0,0 +1,29 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ -o jsonpath='{.metadata.labels.{{ label }}}'
+ register: label_value
+ failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - label_value.stdout is defined
+ - label_value.stdout != value
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - unlabel is defined
+ - unlabel
+ - not ansible_check_mode
+ - label_value.stdout != ""
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
new file mode 100644
index 000000000..c4ec1b255
--- /dev/null
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- debug: msg="Created temp dir {{mktemp.stdout}}"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- include: "{{ role_path }}/tasks/install_logging.yaml"
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
+ when: openshift_logging_upgrade_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_logging.yaml"
+ when:
+ - not openshift_logging_install_logging | default(false) | bool
+ - not openshift_logging_upgrade_logging | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: logging_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
new file mode 100644
index 000000000..c362b7fca
--- /dev/null
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -0,0 +1,29 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_init
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_changed
+ failed_when: "'error' in generation_changed.stderr"
+ changed_when: generation_changed.stdout | int > generation_init.stdout | int
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
new file mode 100644
index 000000000..44dd5e894
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -0,0 +1,52 @@
+---
+- name: Checking for {{ cert_info.procure_component }}.crt
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt"
+ register: component_cert_file
+ check_mode: no
+
+- name: Checking for {{ cert_info.procure_component }}.key
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key"
+ register: component_key_file
+ check_mode: no
+
+- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Creating signed server cert and key for {{ cert_info.procure_component }}
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ check_mode: no
+ when:
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
new file mode 100644
index 000000000..125d3b8af
--- /dev/null
+++ b/roles/openshift_logging/tasks/scale.yaml
@@ -0,0 +1,28 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: replica_count
+ failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
+ --replicas={{desired}} -n {{openshift_logging_namespace}}
+ register: scale_result
+ failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}'
+ register: replica_counts
+ until: replica_counts.stdout|int == desired
+ retries: 30
+ delay: 10
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
new file mode 100644
index 000000000..a96ad3f3a
--- /dev/null
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -0,0 +1,104 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ check_mode: no
+ changed_when: no
+
+- name: start fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana-ops
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..e44493e4d
--- /dev/null
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -0,0 +1,97 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ changed_when: no
+
+- name: stop fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ unlabel: True
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
new file mode 100644
index 000000000..a93463239
--- /dev/null
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -0,0 +1,41 @@
+---
+- name: Stop the Cluster
+ include: stop_cluster.yaml
+
+- name: Upgrade logging
+ include: install_logging.yaml
+ vars:
+ start_cluster: False
+
+# start ES so that we can run migrate script
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+ register: running_pod
+ until: running_pod.stdout != ''
+ retries: 30
+ delay: 10
+ changed_when: no
+ check_mode: no
+
+- name: Run upgrade script
+ script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
+ register: script_output
+ changed_when:
+ - script_output.rc == 0
+ - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
+
+- name: Start up rest of cluster
+ include: start_cluster.yaml