diff options
author | ewolinetz <ewolinet@redhat.com> | 2016-09-28 10:52:07 -0500 |
---|---|---|
committer | ewolinetz <ewolinet@redhat.com> | 2016-12-14 15:38:10 -0600 |
commit | b579a4acfa64f85119ffbcbb8f6701972ef0dbb6 (patch) | |
tree | 6b65a25017defdca2fafe8655a858436c34db679 /roles/openshift_logging/tasks | |
parent | 43f52e292afac7bde5e588377e56d9c49574806c (diff) | |
download | openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.gz openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.bz2 openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.xz openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.zip |
Creating openshift_logging role for deploying Aggregated Logging without a deployer image
Diffstat (limited to 'roles/openshift_logging/tasks')
28 files changed, 1494 insertions, 0 deletions
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml new file mode 100644 index 000000000..6e8fc29d0 --- /dev/null +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -0,0 +1,93 @@ +--- +- name: stop logging + include: stop_cluster.yaml + +# delete the deployment objects that we had created +- name: delete logging api objects + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - dc + - rc + - svc + - routes + - templates + - daemonset + +# delete the oauthclient +- name: delete oauthclient kibana-proxy + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true + +# delete any image streams that we may have created +- name: delete logging is + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true + +# delete our old secrets +- name: delete logging secrets + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-fluentd + - logging-elasticsearch + - logging-kibana + - logging-kibana-proxy + - logging-curator + ignore_errors: yes + +# delete role bindings +- name: delete rolebindings + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-elasticsearch-view-role + +# delete cluster role bindings +- name: delete cluster role bindings + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - rolebinding-reader + +# delete cluster roles +- name: delete cluster roles + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - rolebinding-reader + +# delete our service accounts +- name: delete service accounts + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - aggregated-logging-elasticsearch + - aggregated-logging-kibana + - aggregated-logging-curator + - aggregated-logging-fluentd + +# delete our roles +- name: delete roles + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - daemonset-admin + +# delete our configmaps +- name: delete configmaps + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true + with_items: + - logging-curator + - logging-elasticsearch + - logging-fluentd diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml new file mode 100644 index 000000000..161d51055 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -0,0 +1,168 @@ +--- +# we will ensure our secrets and configmaps are set up here first +- name: Checking for ca.key + stat: path="{{generated_certs_dir}}/ca.key" + register: ca_key_file + check_mode: no + +- name: Checking for ca.crt + stat: path="{{generated_certs_dir}}/ca.crt" + register: ca_cert_file + check_mode: no + +- name: Checking for ca.serial.txt + stat: path="{{generated_certs_dir}}/ca.serial.txt" + register: ca_serial_file + check_mode: no + +- name: Generate certificates + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert + --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt + --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test + check_mode: no + when: + - not ca_key_file.stat.exists + - not ca_cert_file.stat.exists + - not ca_serial_file.stat.exists + +- name: Checking for signing.conf + stat: path="{{generated_certs_dir}}/signing.conf" + register: signing_conf_file + check_mode: no + +- block: + - copy: src=signing.conf dest={{generated_certs_dir}}/signing.conf + check_mode: no + + - lineinfile: "dest={{generated_certs_dir}}/signing.conf regexp='# Top dir$' line='dir = {{generated_certs_dir}} # Top dir'" + check_mode: no + when: + - not signing_conf_file.stat.exists + +- include: procure_server_certs.yaml + loop_control: + loop_var: cert_info + with_items: + - procure_component: kibana + - procure_component: kibana-ops + - procure_component: kibana-internal + hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana-ops + +# - include: procure_server_certs.yaml +# vars: +# - procure_component: kibana-internal +# - hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" + +- name: Copy proxy TLS configuration file + copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json + when: server_tls_json is undefined + check_mode: no + +- name: Copy proxy TLS configuration file + copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json + when: server_tls_json is defined + check_mode: no + +- name: Checking for ca.db + stat: path="{{generated_certs_dir}}/ca.db" + register: ca_db_file + check_mode: no + +- copy: content="" dest={{generated_certs_dir}}/ca.db + check_mode: no + when: + - not ca_db_file.stat.exists + +- name: Checking for ca.crt.srl + stat: path="{{generated_certs_dir}}/ca.crt.srl" + register: ca_cert_srl_file + check_mode: no + +- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl + check_mode: no + when: + - not ca_cert_srl_file.stat.exists + +- name: Generate PEM certs + include: generate_pems.yaml component={{node_name}} + with_items: + - system.logging.fluentd + - system.logging.kibana + - system.logging.curator + - system.admin + loop_control: + loop_var: node_name + +- shell: certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,dns:$cert; done; echo $certs + register: elasticsearch_certs + check_mode: no + +- shell: certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,dns:$cert; done; echo $certs + register: logging_es_certs + check_mode: no + +#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs +# register: elasticsearch_certs +# check_mode: no + +#- shell: index=2; certs=""; for cert in $(echo logging-es{,-ops}{,-cluster}{,.logging.svc.cluster.local}); do certs=$certs,DNS.$index=$cert; index=$(($index+1)); done; echo $certs +# register: logging_es_certs +# check_mode: no + +- name: Generate PKCS12 chains +# include: generate_pkcs12.yaml component='system.admin' + include: generate_jks_chain.yaml component='system.admin' + +- name: Generate PKCS12 chains +# include: generate_pkcs12.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} + include: generate_jks_chain.yaml component={{node.name}} oid={{node.oid | default(False)}} chain_certs={{node.certs}} + with_items: + - {name: 'elasticsearch', oid: True, certs: '{{elasticsearch_certs.stdout}}'} + - {name: 'logging-es', certs: '{{logging_es_certs.stdout}}'} + loop_control: + loop_var: node +# This should be handled within the ES image instead... --- +#- name: Copy jks script +# copy: +# src: generate-jks.sh +# dest: "{{etcd_generated_certs_dir}}/logging" + +#- name: Generate JKS chains +# template: +# src: job.j2 +# dest: "{{mktemp.stdout}}/jks_job.yaml" + +#- name: kick off job +# shell: > +# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_job.yaml -n {{logging_namespace}} +# register: podoutput + +#- shell: > +# echo {{podoutput.stdout}} | awk -v podname='\\\".*\\\"' '{print $2}' +# register: podname + +#- action: shell > +# {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig oc get pod/{{podname.stdout}} -o go-template='{{ '{{' }}index .status "phase"{{ '}}' }}' -n {{logging_namespace}} +# register: result +# until: result.stdout.find("Succeeded") != -1 +# retries: 5 +# delay: 10 +# --- This should be handled within the ES image instead... +- name: Generate proxy session + shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 200 + register: session_secret + check_mode: no + +- name: Generate oauth client secret + shell: tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 64 + register: oauth_secret + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml new file mode 100644 index 000000000..ffd5f1e00 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml @@ -0,0 +1,12 @@ +--- +- name: Generate ClusterRoleBindings + template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml + vars: + acct_name: aggregated-logging-elasticsearch + obj_name: rolebinding-reader + crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"] + subjects: + - kind: ServiceAccount + name: "{{acct_name}}" + namespace: "{{openshift_logging_namespace}}" + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml new file mode 100644 index 000000000..8b0ef377a --- /dev/null +++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml @@ -0,0 +1,10 @@ +--- +- name: Generate ClusterRole for cluster-reader + template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml + vars: + obj_name: rolebinding-reader + rules: + - resources: [clusterrolebindings] + verbs: + - get + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml new file mode 100644 index 000000000..86882a5da --- /dev/null +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -0,0 +1,103 @@ +--- +- block: + - copy: + src: elasticsearch-logging.yml + dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" + when: es_logging_contents is undefined + + - copy: + src: elasticsearch.yml + dest: "{{mktemp.stdout}}/elasticsearch.yml" + when: es_config_contents is undefined + + - lineinfile: + dest: "{{mktemp.stdout}}/elasticsearch.yml" + regexp: '^openshift\.operations\.allow_cluster_reader(.)*$' + line: "\nopenshift.operations.allow_cluster_reader: {{openshift_logging_es_ops_allow_cluster_reader | lower}}" + when: es_config_contents is undefined + + - copy: + content: "{{es_logging_contents}}" + dest: "{{mktemp.stdout}}/elasticsearch-logging.yml" + when: es_logging_contents is defined + + - copy: + content: "{{es_config_contents}}" + dest: "{{mktemp.stdout}}/elasticsearch.yml" + when: es_config_contents is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch + --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run + register: es_configmap + + - copy: + content: "{{es_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml" + when: es_configmap.stdout is defined + check_mode: no + +- block: + - copy: + src: curator.yml + dest: "{{mktemp.stdout}}/curator.yml" + when: curator_config_contents is undefined + + - copy: + content: "{{curator_config_contents}}" + dest: "{{mktemp.stdout}}/curator.yml" + when: curator_config_contenets is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator + --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run + register: curator_configmap + + - copy: + content: "{{curator_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml" + when: curator_configmap.stdout is defined + check_mode: no + +- block: + - copy: + src: fluent.conf + dest: "{{mktemp.stdout}}/fluent.conf" + when: fluentd_config_contents is undefined + + - copy: + src: fluentd-throttle-config.yaml + dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" + when: fluentd_throttle_contents is undefined + + - copy: + src: secure-forward.conf + dest: "{{mktemp.stdout}}/secure-forward.conf" + when: fluentd_securefoward_contents is undefined + + - copy: + content: "{{fluentd_config_contents}}" + dest: "{{mktemp.stdout}}/fluent.conf" + when: fluentd_config_contents is defined + + - copy: + content: "{{fluentd_throttle_contents}}" + dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml" + when: fluentd_throttle_contents is defined + + - copy: + content: "{{fluentd_secureforward_contents}}" + dest: "{{mktemp.stdout}}/secure-forward.conf" + when: fluentd_secureforward_contents is defined + + - shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd + --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml + --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run + register: fluentd_configmap + + - copy: + content: "{{fluentd_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml" + when: fluentd_configmap.stdout is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml new file mode 100644 index 000000000..151cafd9d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml @@ -0,0 +1,59 @@ +--- +- name: Generate kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml + vars: + component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + check_mode: no + +- name: Generate OPS kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml + vars: + component: kibana-ops + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + check_mode: no + +- name: Generate elasticsearch deploymentconfig + template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml + vars: + component: es + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-abc123" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + check_mode: no + +- name: Generate OPS elasticsearch deploymentconfig + template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml + vars: + component: es-ops + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-abc123" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + check_mode: no + +- name: Generate curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml + vars: + component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + check_mode: no + +- name: Generate OPS curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml + vars: + component: curator-ops + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + openshift_logging_es_host: logging-es-ops + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_jks_chain.yaml b/roles/openshift_logging/tasks/generate_jks_chain.yaml new file mode 100644 index 000000000..14ffdc51f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_jks_chain.yaml @@ -0,0 +1,60 @@ +--- +- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" + when: chain_certs is defined and oid is defined + +- debug: msg="certs are {{chain_certs}}" + when: chain_certs is defined and oid is undefined + +- name: Build extensions with certs + shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid:1.2.3.4.5.5','') }}" + register: cert_ext + when: chain_certs is defined and oid is defined + check_mode: no + +- debug: msg="extensions are {{cert_ext.stdout}}" + when: cert_ext.stdout is defined + +- shell: > + echo {{ (cert_ext.stdout is defined) | ternary( '-ext san=dns:localhost,ip:127.0.0.1','') }}{{ (cert_ext.stdout is defined) | ternary( cert_ext.stdout, '') }} + register: extensions + check_mode: no + +- name: Checking for {{component}}.jks ... + stat: path="{{generated_certs_dir}}/{{component}}.jks" + register: jks_file + check_mode: no + +- name: Checking for truststore... + stat: path="{{generated_certs_dir}}/truststore.jks" + register: jks_truststore + check_mode: no + +- block: + - shell: > + keytool -genkey -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -keypass kspass -storepass kspass + -keyalg RSA -keysize 2048 -validity 712 -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} + + - shell: > + keytool -certreq -alias {{component}} -keystore {{generated_certs_dir}}/{{component}}.jks -storepass kspass + -file {{generated_certs_dir}}/{{component}}-jks.csr -keyalg RSA -dname "CN={{component}}, OU=OpenShift, O=Logging" {{extensions.stdout}} + + - shell: > + openssl ca -in {{generated_certs_dir}}/{{component}}-jks.csr -notext -out {{generated_certs_dir}}/{{component}}-jks.crt + -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext + + - shell: > + keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/{{component}}.jks + -storepass kspass -noprompt -alias sig-ca + + - shell: > + keytool -import -file {{generated_certs_dir}}/{{component}}-jks.crt -keystore {{generated_certs_dir}}/{{component}}.jks + -storepass kspass -noprompt -alias {{component}} + + when: not jks_file.stat.exists + check_mode: no + +- block: + - shell: > + keytool -import -file {{generated_certs_dir}}/ca.crt -keystore {{generated_certs_dir}}/truststore.jks -storepass tspass -noprompt -alias sig-ca + when: not jks_truststore.stat.exists + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml new file mode 100644 index 000000000..289b72ea6 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pems.yaml @@ -0,0 +1,36 @@ +--- +- name: Checking for {{component}}.key + stat: path="{{generated_certs_dir}}/{{component}}.key" + register: key_file + check_mode: no + +- name: Checking for {{component}}.crt + stat: path="{{generated_certs_dir}}/{{component}}.crt" + register: cert_file + check_mode: no + +- name: Creating cert req for {{component}} + command: > + openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key + -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes + when: + - not key_file.stat.exists + - cert_ext.stdout is defined + check_mode: no + +- name: Creating cert req for {{component}} + command: > + openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key + -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes + when: + - not key_file.stat.exists + - cert_ext.stdout is undefined + check_mode: no + +- name: Sign cert request with CA for {{component}} + command: > + openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt + -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext + when: + - not cert_file.stat.exists + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_pkcs12.yaml b/roles/openshift_logging/tasks/generate_pkcs12.yaml new file mode 100644 index 000000000..dde65746f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pkcs12.yaml @@ -0,0 +1,24 @@ +--- +- debug: msg="certs are {{chain_certs}} and oid is {{oid}}" + when: chain_certs is defined and oid is defined + +- debug: msg="certs are {{chain_certs}}" + when: chain_certs is defined and oid is undefined + +- name: Build extensions with certs + shell: echo "{{chain_certs}}{{ (oid) | ternary(',oid=1.2.3.4.5.5','') }}" + register: cert_ext + when: chain_certs is defined and oid is defined + +- debug: msg="extensions are {{cert_ext.stdout}}" + when: cert_ext.stdout is defined + +- include: generate_pems.yaml + +- local_action: stat path="{{mktemp.stdout}}/{{component}}.pkcs12" + register: pkcs_file + become: no + +- name: Generating pkcs12 chain for {{component}} + command: openssl pkcs12 -export -out {{generated_certs_dir}}/{{component}}.pkcs12 -inkey {{generated_certs_dir}}/{{component}}.key -in {{generated_certs_dir}}/{{component}}.crt -password pass:pass + when: not pkcs_file.stat.exists diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml new file mode 100644 index 000000000..ee4416bbd --- /dev/null +++ b/roles/openshift_logging/tasks/generate_pvcs.yaml @@ -0,0 +1,47 @@ +--- +- name: Init pool of PersistentVolumeClaim names + set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}} + vars: + pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}" + start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}" + with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}} + when: + - openshift_logging_es_pvc_size | search('^\d.*') + - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}" + check_mode: no + +- name: Generating PersistentVolumeClaims + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{claim_name}}" + size: "{{openshift_logging_es_pvc_size}}" + access_modes: + - ReadWriteOnce + pv_selector: "{{openshift_logging_es_pv_selector}}" + with_items: + - "{{es_pvc_pool | default([])}}" + loop_control: + loop_var: claim_name + when: + - not openshift_logging_es_pvc_dynamic + - es_pvc_pool is defined + check_mode: no + +- name: Generating PersistentVolumeClaims - Dynamic + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{claim_name}}" + annotations: + volume.alpha.kubernetes.io/storage-class: "dynamic" + size: "{{openshift_logging_es_pvc_size}}" + access_modes: + - ReadWriteOnce + pv_selector: "{{openshift_logging_es_pv_selector}}" + with_items: + - "{{es_pvc_pool|default([])}}" + loop_control: + loop_var: claim_name + when: + - openshift_logging_es_pvc_dynamic + - es_pvc_pool is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml new file mode 100644 index 000000000..02f81368d --- /dev/null +++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml @@ -0,0 +1,11 @@ +--- +- name: Generate RoleBindings + template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml + vars: + obj_name: logging-elasticsearch-view-role + roleRef: + name: view + subjects: + - kind: ServiceAccount + name: aggregated-logging-elasticsearch + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml new file mode 100644 index 000000000..d280ac04c --- /dev/null +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -0,0 +1,20 @@ +--- +- name: Generating logging routes + template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml + tags: routes + vars: + obj_name: "{{route_info.name}}" + route_host: "{{route_info.host}}" + service_name: "{{route_info.name}}" + tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" + tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" + labels: + component: support + logging-infra: support + provider: openshift + with_items: + - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"} + - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"} + loop_control: + loop_var: route_info + when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana' diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml new file mode 100644 index 000000000..e20b88c0f --- /dev/null +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -0,0 +1,73 @@ +--- +- name: Retrieving the cert to use when generating secrets for the logging components + slurp: src="{{generated_certs_dir}}/{{item.file}}" + register: key_pairs + with_items: + - { name: "ca_file", file: "ca.crt" } + - { name: "kibana_key", file: "system.logging.kibana.key"} + - { name: "kibana_cert", file: "system.logging.kibana.crt"} + - { name: "curator_key", file: "system.logging.curator.key"} + - { name: "curator_cert", file: "system.logging.curator.crt"} + - { name: "fluentd_key", file: "system.logging.fluentd.key"} + - { name: "fluentd_cert", file: "system.logging.fluentd.crt"} + - { name: "kibana_internal_key", file: "kibana-internal.key"} + - { name: "kibana_internal_cert", file: "kibana-internal.crt"} + - { name: "server_tls", file: "server-tls.json"} + +- name: Generating secrets for logging components + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: logging-{{component}} + secret_key_file: "{{component}}_key" + secret_cert_file: "{{component}}_cert" + secrets: + - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} + - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} + - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} + secret_keys: ["ca", "cert", "key"] + with_items: + - kibana + - curator + - fluentd + loop_control: + loop_var: component + when: secret_name not in openshift_logging_facts.{{component}}.secrets or + secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- name: Generating secrets for kibana proxy + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: logging-kibana-proxy + secrets: + - {key: oauth-secret, value: "{{oauth_secret.stdout}}"} + - {key: session-secret, value: "{{session_secret.stdout}}"} + - {key: server-key, value: "{{kibana_key_file}}"} + - {key: server-cert, value: "{{kibana_cert_file}}"} + - {key: server-tls, value: "{{server_tls_file}}"} + secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] + kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" + kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" + server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}" + when: secret_name not in openshift_logging_facts.kibana.secrets or + secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- name: Generating secrets for elasticsearch + command: > + {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}} + key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks + searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks + admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt + admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml + vars: + secret_name: logging-elasticsearch + secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"] + register: logging_es_secret + when: secret_name not in openshift_logging_facts.elasticsearch.secrets or + secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0 + check_mode: no + +- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml + when: logging_es_secret.stdout is defined + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml new file mode 100644 index 000000000..7b956e2e0 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml @@ -0,0 +1,13 @@ +--- +- name: Generating serviceaccounts + template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml + vars: + obj_name: aggregated-logging-{{component}} + with_items: + - elasticsearch + - kibana + - fluentd + - curator + loop_control: + loop_var: component + check_mode: no diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml new file mode 100644 index 000000000..95f113577 --- /dev/null +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -0,0 +1,81 @@ +--- +- name: Generating logging-es service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml + vars: + obj_name: logging-es + ports: + - {port: 9200, targetPort: restapi} + labels: + logging-infra: support + selector: + provider: openshift + component: es + check_mode: no + +- name: Generating logging-es-cluster service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml + vars: + obj_name: logging-es-cluster + ports: + - {port: 9300} + labels: + logging-infra: support + selector: + provider: openshift + component: es + check_mode: no + +- name: Generating logging-kibana service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml + vars: + obj_name: logging-kibana + ports: + - {port: 443, targetPort: oaproxy} + labels: + logging-infra: support + selector: + provider: openshift + component: kibana + check_mode: no + +- name: Generating logging-es-ops service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml + vars: + obj_name: logging-es-ops + ports: + - {port: 9200, targetPort: restapi} + labels: + logging-infra: support + selector: + provider: openshift + component: es-ops + when: openshift_logging_use_ops + check_mode: no + +- name: Generating logging-es-ops-cluster service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml + vars: + obj_name: logging-es-ops-cluster + ports: + - {port: 9300} + labels: + logging-infra: support + selector: + provider: openshift + component: es-ops + when: openshift_logging_use_ops + check_mode: no + +- name: Generating logging-kibana-ops service + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml + vars: + obj_name: logging-kibana-ops + ports: + - {port: 443, targetPort: oaproxy} + labels: + logging-infra: support + selector: + provider: openshift + component: kibana-ops + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml new file mode 100644 index 000000000..165a9d14e --- /dev/null +++ b/roles/openshift_logging/tasks/install_curator.yaml @@ -0,0 +1,27 @@ +--- +- name: Generate curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml + vars: + component: curator + logging_component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}" + curator_memory_limit: "{{openshift_logging_curator_memory_limit }}" + check_mode: no + +- name: Generate OPS curator deploymentconfig + template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml + vars: + component: curator-ops + logging_component: curator + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}" + curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}" + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml new file mode 100644 index 000000000..c5d8d3537 --- /dev/null +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -0,0 +1,105 @@ +--- +- name: Generate PersistentVolumeClaims + include: "{{ role_path}}/tasks/generate_pvcs.yaml" + vars: + es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}" + es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}" + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + +- name: Init pool of DeploymentConfig names for Elasticsearch + set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}} + vars: + component: es + es_cluster_name: "{{component}}" + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}} + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + check_mode: no + + +- name: Generate Elasticsearch DeploymentConfig + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: es + logging_component: elasticsearch + deploy_name_prefix: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{openshift_logging_es_memory_limit}}" + volume_names: "{{es_pvc_pool | default([])}}" + pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + deploy_name: "{{item.1}}" + with_indexed_items: + - "{{es_dc_pool | default([])}}" + check_mode: no + when: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}" + +# --------- Tasks for Operation clusters --------- + +- name: Validate Elasticsearch cluster size for Ops + fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" + vars: + es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" + cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + when: + - openshift_logging_use_ops + - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}" + check_mode: no + +- name: Generate PersistentVolumeClaims for Ops + include: "{{ role_path}}/tasks/generate_pvcs.yaml" + vars: + es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}" + es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}" + openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}" + openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}" + openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}" + openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}" + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no + +- name: Init pool of DeploymentConfig names for Elasticsearch for Ops + set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}} + vars: + component: es-ops + es_cluster_name: "{{component}}" + deploy_name_prefix: "logging-{{component}}" + deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + cluster_size: "{{openshift_logging_es_ops_cluster_size}}" + with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}} + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no + +- name: Generate Elasticsearch DeploymentConfig for Ops + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: es-ops + logging_component: elasticsearch + deploy_name_prefix: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + volume_names: "{{es_pvc_pool | default([])}}" + pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}" + deploy_name: "{{item.1}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}" + es_node_quorum: "{{es_ops_node_quorum}}" + es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" + es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" + openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" + with_indexed_items: + - "{{es_dc_pool_ops | default([])}}" + when: + - openshift_logging_use_ops + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}" + check_mode: no diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml new file mode 100644 index 000000000..35bd452ed --- /dev/null +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -0,0 +1,38 @@ +--- +- shell: > + echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}" + register: fluentd_ops_host + check_mode: no + +- shell: > + echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}" + register: fluentd_ops_port + check_mode: no + + +- name: Generating Fluentd daemonset + template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml + vars: + daemonset_name: logging-fluentd + daemonset_component: fluentd + daemonset_container_name: fluentd-elasticsearch + daemonset_serviceAccount: aggregated-logging-fluentd + ops_host: "{{ fluentd_ops_host.stdout }}" + ops_port: "{{ fluentd_ops_port.stdout }}" + check_mode: no + +- name: "Set permissions for fluentd" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: fluentd_output + failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + check_mode: no + +- name: "Set additional permissions for fluentd" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: fluentd2_output + failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + check_mode: no diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml new file mode 100644 index 000000000..382ab2522 --- /dev/null +++ b/roles/openshift_logging/tasks/install_kibana.yaml @@ -0,0 +1,33 @@ +--- +- name: Generate kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml + vars: + component: kibana + logging_component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}" + kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}" + kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}" + kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}" + check_mode: no + +- name: Generate OPS kibana deploymentconfig + template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml + vars: + component: kibana-ops + logging_component: kibana + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}" + proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}" + es_host: logging-es-ops + es_port: "{{openshift_logging_es_ops_port}}" + kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}" + kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}" + kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}" + kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}" + when: openshift_logging_use_ops + check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml new file mode 100644 index 000000000..591f11476 --- /dev/null +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -0,0 +1,49 @@ +--- +- name: Gather OpenShift Logging Facts + openshift_logging_facts: + oc_bin: "{{openshift.common.client_binary}}" + admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig" + openshift_logging_namespace: "{{openshift_logging_namespace}}" + tags: logging_facts + check_mode: no + +- name: Validate Elasticsearch cluster size + fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed" + when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}" + +- name: Install logging + include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml" + when: openshift_hosted_logging_install | default(true) | bool + with_items: + - support + - elasticsearch + - kibana + - curator + - fluentd + loop_control: + loop_var: install_component + +- name: Register API objects from generated templates + shell: ls -d -1 {{mktemp.stdout}}/templates/* | sort + register: logging_objects + check_mode: no + +- name: Creating API objects from generated templates + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig apply -f {{file}} -n {{openshift_logging_namespace}} + with_items: "{{logging_objects.stdout_lines}}" + loop_control: + loop_var: file + when: not ansible_check_mode + +- name: Printing out objects to create + debug: msg="{{lookup('file', file)|quote}}" + with_fileglob: + - "{{mktemp.stdout}}/templates/*.yaml" + loop_control: + loop_var: file + when: ansible_check_mode + +- name: Scaling up cluster + include: start_cluster.yaml + when: start_cluster | default(true) | bool diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml new file mode 100644 index 000000000..71979a7d8 --- /dev/null +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -0,0 +1,52 @@ +--- +# This is the base configuration for installing the other components +- name: Check for logging project already exists + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers + register: logging_project_result + ignore_errors: yes + when: not ansible_check_mode + +- name: "Create logging project" + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}} + when: not ansible_check_mode and "not found" in logging_project_result.stderr + +- name: Create logging cert directory + file: path={{openshift.common.config_base}}/logging state=directory mode=0755 + changed_when: False + check_mode: no + +- include: generate_certs.yaml + vars: + generated_certs_dir: "{{openshift.common.config_base}}/logging" + +- name: Create temp directory for all our templates + file: path={{mktemp.stdout}}/templates state=directory mode=0755 + changed_when: False + check_mode: no + +- include: generate_secrets.yaml + vars: + generated_certs_dir: "{{openshift.common.config_base}}/logging" + +- include: generate_configmaps.yaml + +- include: generate_services.yaml + +- name: Generate kibana-proxy oauth client + template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml + vars: + secret: "{{oauth_secret.stdout}}" + when: oauth_secret.stdout is defined + check_mode: no + +- include: generate_clusterroles.yaml + +- include: generate_rolebindings.yaml + +- include: generate_clusterrolebindings.yaml + +- include: generate_serviceaccounts.yaml + +- include: generate_routes.yaml diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml new file mode 100644 index 000000000..55cfea38c --- /dev/null +++ b/roles/openshift_logging/tasks/label_node.yaml @@ -0,0 +1,27 @@ +--- +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}} + --template='{{ '{{index .metadata.labels "' }}{{label}}{{ '"}}' }}' + register: label_value + failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr + when: not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - value is defined + - label_value.stdout is defined + - label_value.stdout != value + - unlabel is not defined or not unlabel + - not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}- + register: label_result + failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr + when: + - unlabel is defined + - unlabel + - not ansible_check_mode diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml new file mode 100644 index 000000000..b64c24ade --- /dev/null +++ b/roles/openshift_logging/tasks/main.yaml @@ -0,0 +1,35 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + tags: logging_init + +- debug: msg="Created temp dir {{mktemp.stdout}}" + +- name: Copy the admin client config(s) + command: > + cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + tags: logging_init + +- include: "{{ role_path }}/tasks/install_logging.yaml" + when: openshift_logging_install_logging | default(false) | bool + +- include: "{{ role_path }}/tasks/upgrade_logging.yaml" + when: openshift_logging_upgrade_logging | default(false) | bool + +- include: "{{ role_path }}/tasks/delete_logging.yaml" + when: + - not openshift_logging_install_logging | default(false) | bool + - not openshift_logging_upgrade_logging | default(false) | bool + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + tags: logging_cleanup + changed_when: False + check_mode: no diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml new file mode 100644 index 000000000..2c046d6e6 --- /dev/null +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -0,0 +1,54 @@ +--- +- name: Checking for {{ cert_info.procure_component }}.crt + stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt" + register: component_cert_file + check_mode: no + +- name: Checking for {{ cert_info.procure_component }}.key + stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key" + register: component_key_file + check_mode: no + +- name: Trying to discover server cert variable name for {{ cert_info.procure_component }} + command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}" + register: procure_component_crt + when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined + check_mode: no + +- name: Trying to discover the server key variable name for {{ cert_info.procure_component }} + command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}" + register: procure_component_key + when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined + check_mode: no + +- name: Creating signed server cert and key for {{ cert_info.procure_component }} + command: > + {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert + --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt + --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key + --signer-serial={{generated_certs_dir}}/ca.serial.txt + check_mode: no + when: + - cert_info.hostnames is defined + - not component_key_file.stat.exists + - not component_cert_file.stat.exists + +- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory + copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key + check_mode: no + when: + - cert_info.hostnames is undefined + - "{{ cert_info.procure_component }}_crt is defined" + - "{{ cert_info.procure_component }}_key is defined" + - not component_key_file.stat.exists + - not component_cert_file.stat.exists + +- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory + copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt + check_mode: no + when: + - cert_info.hostnames is undefined + - "{{ cert_info.procure_component }}_crt is defined" + - "{{ cert_info.procure_component }}_key is defined" + - not component_key_file.stat.exists + - not component_cert_file.stat.exists diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml new file mode 100644 index 000000000..3d86ea171 --- /dev/null +++ b/roles/openshift_logging/tasks/scale.yaml @@ -0,0 +1,26 @@ +--- +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} + --template='{{ '{{.spec.replicas}}' }}' -n {{openshift_logging_namespace}} + register: replica_count + failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr + when: not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}} + --replicas={{desired}} -n {{openshift_logging_namespace}} + register: scale_result + failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr + when: + - replica_count.stdout != desired + - not ansible_check_mode + +- shell: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig describe {{object}} -n {{openshift_logging_namespace}} | awk -v statusrx='Pods Status:' '$0 ~ statusrx {print $3}' + register: replica_counts + until: replica_counts.stdout.find("{{desired}}") != -1 + retries: 30 + delay: 10 + when: + - replica_count.stdout != desired + - not ansible_check_mode diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml new file mode 100644 index 000000000..cdfc5f2d3 --- /dev/null +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -0,0 +1,107 @@ +--- +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 + register: openshift_logging_fluentd_nodeselector_key + check_mode: no + +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 + register: openshift_logging_fluentd_nodeselector_value + check_mode: no + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" + register: fluentd_hosts + when: "'--all' in openshift_logging_fluentd_hosts" + check_mode: no + +- name: start fluentd + include: label_node.yaml + vars: + host: "{{fluentd_host}}" + label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}" + with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" + loop_control: + loop_var: fluentd_host + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} + register: kibana_dc + check_mode: no + +- name: start kibana + include: scale.yaml + vars: + desired: 1 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} + register: curator_dc + check_mode: no + +- name: start curator + include: scale.yaml + vars: + desired: 1 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} + register: kibana_dc + check_mode: no + +- name: start kibana-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} + register: curator_dc + check_mode: no + +- name: start curator-ops + include: scale.yaml + vars: + desired: 1 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml new file mode 100644 index 000000000..e018d0618 --- /dev/null +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -0,0 +1,98 @@ +--- +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d':' -f1 + register: openshift_logging_fluentd_nodeselector_key + +- shell: > + echo "{{openshift_logging_fluentd_nodeselector}}" | cut -d' ' -f2 + register: openshift_logging_fluentd_nodeselector_value + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name | sed "s,^node/,,g" + register: fluentd_hosts + when: "'--all' in openshift_logging_fluentd_hosts" + +- name: stop fluentd + include: label_node.yaml + vars: + host: "{{fluentd_host}}" + label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}" + unlabel: True + with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}" + loop_control: + loop_var: fluentd_host + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + +- name: stop elasticsearch + include: scale.yaml + vars: + desired: 0 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}} + register: kibana_dc + +- name: stop kibana + include: scale.yaml + vars: + desired: 0 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}} + register: curator_dc + +- name: stop curator + include: scale.yaml + vars: + desired: 0 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}} + register: es_dc + +- name: stop elasticsearch-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}} + register: kibana_dc + +- name: stop kibana-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{kibana_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops + +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}} + register: curator_dc + +- name: stop curator-ops + include: scale.yaml + vars: + desired: 0 + with_items: "{{curator_dc.stdout_lines}}" + loop_control: + loop_var: object + when: openshift_logging_use_ops diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml new file mode 100644 index 000000000..b2c8022d5 --- /dev/null +++ b/roles/openshift_logging/tasks/upgrade_logging.yaml @@ -0,0 +1,33 @@ +--- +- name: Stop the Cluster + include: stop_cluster.yaml + +- name: Upgrade logging + include: install_logging.yaml + vars: + start_cluster: False + +# ensure that ES is running +- shell: > + {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}} + register: es_dc + check_mode: no + +- name: start elasticsearch + include: scale.yaml + vars: + desired: 1 + with_items: "{{es_dc.stdout_lines}}" + loop_control: + loop_var: object + +- copy: + src: es_migration.sh + dest: {{mktemp.stdout}}/es_migration.sh + +- name: Run upgrade scripts + shell: > + sh {{mktemp.stdout}}/es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}} + +- name: Start up rest of cluster + include: start_cluster.yaml |