summaryrefslogtreecommitdiffstats
path: root/roles/openshift_logging
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_logging')
-rw-r--r--roles/openshift_logging/README.md29
-rw-r--r--roles/openshift_logging/defaults/main.yml59
-rw-r--r--roles/openshift_logging/handlers/main.yml9
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml49
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml42
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml98
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml60
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml32
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml192
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml41
-rw-r--r--roles/openshift_logging/tasks/main.yaml3
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml94
-rw-r--r--roles/openshift_logging/tasks/procure_shared_key.yaml25
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml80
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml23
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml20
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/templates/curator.j25
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j25
-rw-r--r--roles/openshift_logging/templates/es.j23
-rw-r--r--roles/openshift_logging/templates/fluentd.j218
-rw-r--r--roles/openshift_logging/templates/kibana.j235
-rw-r--r--roles/openshift_logging/templates/mux.j2121
-rw-r--r--roles/openshift_logging/templates/service.j26
-rw-r--r--roles/openshift_logging/vars/main.yaml12
30 files changed, 935 insertions, 212 deletions
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 42f4fc72e..3c410eff2 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -91,11 +91,36 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
-- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
-- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
+
+Elasticsearch can be exposed for external clients outside of the cluster.
+- `openshift_logging_es_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
+Elasticsearch OPS too, if using an OPS cluster:
+- `openshift_logging_es_ops_allow_external`: True (default is False) - if this is
+ True, Elasticsearch will be exposed as a Route
+- `openshift_logging_es_ops_hostname`: The external facing hostname to use for
+ the route and the TLS server certificate (default is "es-ops." +
+ `openshift_master_default_subdomain`)
+- `openshift_logging_es_ops_cert`: The location of the certificate Elasticsearch
+ uses for the external TLS server cert (default is a generated cert)
+- `openshift_logging_es_ops_key`: The location of the key Elasticsearch
+ uses for the external TLS server cert (default is a generated key)
+- `openshift_logging_es_ops_ca_ext`: The location of the CA cert for the cert
+ Elasticsearch uses for the external TLS server cert (default is the internal
+ CA)
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 96ed44011..837c54067 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
+openshift_logging_nodeselector: null
+openshift_logging_labels: {}
+openshift_logging_label_key: ""
+openshift_logging_label_value: ""
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
@@ -22,10 +26,10 @@ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
-openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -46,10 +50,10 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
-openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -68,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"
openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
@@ -95,6 +99,22 @@ openshift_logging_es_config: {}
openshift_logging_es_number_of_shards: 1
openshift_logging_es_number_of_replicas: 0
+# for exposing es to external (outside of the cluster) clients
+openshift_logging_es_allow_external: False
+openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es certs
+openshift_logging_es_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es certs
+openshift_logging_es_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es certs
+openshift_logging_es_ca_ext: ""
+
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -113,12 +133,35 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_es_ops_number_of_shards: 1
-openshift_logging_es_ops_number_of_replicas: 0
+
+# for exposing es-ops to external (outside of the cluster) clients
+openshift_logging_es_ops_allow_external: False
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+
+#The absolute path on the control node to the cert file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing es-ops certs
+openshift_logging_es_ops_ca_ext: ""
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
+# mux - secure_forward listener service
+openshift_logging_mux_allow_external: False
+openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
+# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch
+openshift_logging_use_mux_client: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+openshift_logging_mux_cpu_limit: 100m
+openshift_logging_mux_memory_limit: 512Mi
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
@@ -127,3 +170,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc
#fluentd_config_contents:
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
+#fluentd_mux_config_contents:
+#fluentd_mux_secureforward_contents:
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 64bc33435..a55e72725 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -37,7 +37,7 @@ LOGGING_INFRA_KEY = "logging-infra"
# selectors for filtering resources
DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
-ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
+ROUTE_SELECTOR = "component=support,logging-infra=support,provider=openshift"
COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 188ea246c..2f5b68b4d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -44,6 +44,7 @@
- logging-kibana
- logging-kibana-proxy
- logging-curator
+ - logging-mux
ignore_errors: yes
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
@@ -109,5 +110,6 @@
- logging-curator
- logging-elasticsearch
- logging-fluentd
+ - logging-mux
register: delete_result
changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 740e490e1..46a7e82c6 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -45,6 +45,39 @@
- procure_component: kibana-internal
hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: mux
+ hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
+ when: openshift_logging_use_mux
+
+- include: procure_shared_key.yaml
+ loop_control:
+ loop_var: shared_key_info
+ with_items:
+ - procure_component: mux
+ when: openshift_logging_use_mux
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es
+ hostnames: "es, {{openshift_logging_es_hostname}}"
+ when: openshift_logging_es_allow_external | bool
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: es-ops
+ hostnames: "es-ops, {{openshift_logging_es_ops_hostname}}"
+ when:
+ - openshift_logging_es_allow_external | bool
+ - openshift_logging_use_ops | bool
+
- name: Copy proxy TLS configuration file
copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
when: server_tls_json is undefined
@@ -85,6 +118,22 @@
loop_control:
loop_var: node_name
+- name: Generate PEM cert for mux
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.mux
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_use_mux
+
+- name: Generate PEM cert for Elasticsearch external route
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.es
+ loop_control:
+ loop_var: node_name
+ when: openshift_logging_es_allow_external | bool
+
- name: Creating necessary JKS certs
include: generate_jks.yaml
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 253543f54..b047eb35a 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -21,6 +21,8 @@
dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
vars:
- allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
when: es_config_contents is undefined
changed_when: no
@@ -134,3 +136,43 @@
when: fluentd_configmap.stdout is defined
changed_when: no
check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
+ register: mux_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{mux_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
+ when: mux_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+ when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index e77da7a24..ae9a8e023 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -1,14 +1,14 @@
---
- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ when: openshift_logging_kibana_key | trim | length > 0
changed_when: false
- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: "{{openshift_logging_kibana_cert | trim | length > 0}}"
+ when: openshift_logging_kibana_cert | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: "{{openshift_logging_kibana_ca | trim | length > 0}}"
+ when: openshift_logging_kibana_ca | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
@@ -75,3 +75,95 @@
provider: openshift
when: openshift_logging_use_ops | bool
changed_when: no
+
+- set_fact: es_key={{ lookup('file', openshift_logging_es_key) | b64encode }}
+ when:
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact: es_cert={{ lookup('file', openshift_logging_es_cert)| b64encode }}
+ when:
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact: es_ca={{ lookup('file', openshift_logging_es_ca_ext)| b64encode }}
+ when:
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- set_fact: es_ca={{key_pairs | entry_from_named_pair('ca_file') }}
+ when:
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
+ changed_when: false
+
+- name: Generating Elasticsearch logging routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-route.yaml
+ tags: routes
+ vars:
+ obj_name: "logging-es"
+ route_host: "{{openshift_logging_es_hostname}}"
+ service_name: "logging-es"
+ tls_key: "{{es_key | default('') | b64decode}}"
+ tls_cert: "{{es_cert | default('') | b64decode}}"
+ tls_ca_cert: "{{es_ca | b64decode}}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ edge_term_policy: "{{openshift_logging_es_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ changed_when: no
+ when: openshift_logging_es_allow_external | bool
+
+- set_fact: es_ops_key={{ lookup('file', openshift_logging_es_ops_key) | b64encode }}
+ when:
+ - openshift_logging_es_ops_allow_external | bool
+ - openshift_logging_use_ops | bool
+ - "{{ openshift_logging_es_ops_key | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact: es_ops_cert={{ lookup('file', openshift_logging_es_ops_cert)| b64encode }}
+ when:
+ - openshift_logging_es_ops_allow_external | bool
+ - openshift_logging_use_ops | bool
+ - "{{openshift_logging_es_ops_cert | trim | length > 0}}"
+ changed_when: false
+
+- set_fact: es_ops_ca={{ lookup('file', openshift_logging_es_ops_ca_ext)| b64encode }}
+ when:
+ - openshift_logging_es_ops_allow_external | bool
+ - openshift_logging_use_ops | bool
+ - "{{openshift_logging_es_ops_ca_ext | trim | length > 0}}"
+ changed_when: false
+
+- set_fact: es_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
+ when:
+ - openshift_logging_es_ops_allow_external | bool
+ - openshift_logging_use_ops | bool
+ - es_ops_ca is not defined
+ changed_when: false
+
+- name: Generating Elasticsearch logging ops routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-ops-route.yaml
+ tags: routes
+ vars:
+ obj_name: "logging-es-ops"
+ route_host: "{{openshift_logging_es_ops_hostname}}"
+ service_name: "logging-es-ops"
+ tls_key: "{{es_ops_key | default('') | b64decode}}"
+ tls_cert: "{{es_ops_cert | default('') | b64decode}}"
+ tls_ca_cert: "{{es_ops_ca | b64decode}}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ edge_term_policy: "{{openshift_logging_es_ops_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ when:
+ - openshift_logging_es_ops_allow_external | bool
+ - openshift_logging_use_ops | bool
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
index f396bcc6d..b629bd995 100644
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -34,6 +34,36 @@
check_mode: no
changed_when: no
+- name: Retrieving the cert to use when generating secrets for mux
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: mux_key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "mux_key", file: "system.logging.mux.key"}
+ - { name: "mux_cert", file: "system.logging.mux.crt"}
+ - { name: "mux_shared_key", file: "mux_shared_key"}
+ when: openshift_logging_use_mux
+
+- name: Generating secrets for mux
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: "logging-{{component}}"
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
+ secret_keys: ["ca", "cert", "key", "shared_key"]
+ with_items:
+ - mux
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux
+
- name: Generating secrets for kibana proxy
template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
vars:
@@ -43,7 +73,7 @@
- {key: session-secret, value: "{{session_secret}}"}
- {key: server-key, value: "{{kibana_key_file}}"}
- {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls, value: "{{server_tls_file}}"}
+ - {key: server-tls.json, value: "{{server_tls_file}}"}
secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
@@ -69,3 +99,31 @@
when: logging_es_secret.stdout is defined
check_mode: no
changed_when: no
+
+- name: Retrieving the cert to use when generating secrets for Elasticsearch external route
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: es_key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key"}
+ - { name: "es_cert", file: "system.logging.es.crt"}
+ when: openshift_logging_es_allow_external | bool
+
+- name: Generating secrets for Elasticsearch external route
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: "logging-{{component}}"
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{es_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{es_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{es_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ secret_keys: ["ca", "cert", "key"]
+ with_items:
+ - es
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_es_allow_external | bool
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
index 5091c1209..e3a5c5eb3 100644
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -85,3 +85,35 @@
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
+
+- name: Generating logging-mux service for external connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ externalIPs:
+ - "{{ ansible_eth0.ipv4.address }}"
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_mux_allow_external
+
+- name: Generating logging-mux service for intra-cluster connections
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
+ vars:
+ obj_name: logging-mux
+ ports:
+ - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: mux
+ check_mode: no
+ changed_when: no
+ when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 28fad420b..a981e7f7f 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -3,62 +3,51 @@
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+ when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
-- set_fact: es_pvc_pool={{[]}}
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
-- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}"
-
-- name: Generate PersistentVolumeClaims
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+### evaluate if the PVC attached to the dc currently matches the provided vars
+## if it does then we reuse that pvc in the DC
+- include: set_es_storage.yaml
vars:
- es_pv_selector: "{{openshift_logging_es_pv_selector}}"
- es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}"
- es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
- es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}"
- es_pvc_size: "{{openshift_logging_es_pvc_size}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- es_cluster_size: "{{openshift_logging_es_cluster_size}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
-
-# we should initialize the es_dc_pool with the current keys
-- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ es_component: es
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ es_indices | default([]) }}"
loop_control:
- loop_var: deploy_name
-
-# This should be used to generate new DC names if necessary
-- name: Create new DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
- vars:
- component: es
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
- check_mode: no
+ loop_var: deployment
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_memory_limit}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_dc_pool }}"
- check_mode: no
- changed_when: no
+ es_component: es
+ es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
+ es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}"
+ es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -73,74 +62,57 @@
es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
+ - openshift_logging_use_ops | bool
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
-
-- set_fact: es_pvc_pool={{[]}}
-
-- name: Generate PersistentVolumeClaims for Ops
- include: "{{ role_path}}/tasks/generate_pvcs.yaml"
- vars:
- es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
- es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
- es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
- es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
- es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
- es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
- es_access_modes: "{{ openshift_logging_storage_access_modes }}"
- when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
-- name: Init pool of DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
- with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- loop_control:
- loop_var: deploy_name
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
when:
- - openshift_logging_use_ops | bool
+ - openshift_logging_use_ops | bool
-- name: Create new DeploymentConfig names for Elasticsearch Ops
- set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
+- include: set_es_storage.yaml
vars:
- component: es-ops
- es_cluster_name: "{{component}}"
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
+ es_component: es-ops
+ es_name: "{{ deployment.0 }}"
+ es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ es_ops_indices | default([]) }}"
+ loop_control:
+ loop_var: deployment
when:
- - openshift_logging_use_ops | bool
- check_mode: no
+ - openshift_logging_use_ops | bool
+## if it does not then we should create one that does and attach it
-- name: Generate Elasticsearch DeploymentConfig for Ops
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+## create new dc/pvc is needed
+- include: set_es_storage.yaml
vars:
- component: es-ops
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}"
- deploy_name: "{{item.1}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
- es_node_quorum: "{{es_ops_node_quorum}}"
- es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
- es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
- openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"
- es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}"
- es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
- with_indexed_items:
- - "{{ es_ops_dc_pool | default([]) }}"
+ es_component: es-ops
+ es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
+ es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}"
+ es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
+ es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
+ es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
+ es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- - openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
+ - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
index 35273829c..6bc405819 100644
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -32,7 +32,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
check_mode: no
when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -49,6 +49,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
check_mode: no
when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 83b68fa77..aec455c22 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -27,6 +27,10 @@
loop_control:
loop_var: install_component
+- name: Install logging mux
+ include: "{{ role_path }}/tasks/install_mux.yaml"
+ when: openshift_logging_use_mux
+
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
new file mode 100644
index 000000000..91eeb95a1
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -0,0 +1,67 @@
+---
+- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Check mux current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: mux_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generating mux deploymentconfig
+ template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ ops_host: "{{ mux_ops_host }}"
+ ops_port: "{{ mux_ops_port }}"
+ mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
+ mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
+ replicas: "{{mux_replica_count.stdout | default (0)}}"
+ mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check mux hostmount-anyuid permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/hostmount-anyuid -o jsonpath='{.users}'
+ register: mux_hostmount_anyuid
+ check_mode: no
+ changed_when: no
+
+- name: "Set hostmount-anyuid permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux_output
+ failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
+ check_mode: no
+ when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check mux cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: mux_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for mux"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: mux2_output
+ failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
+ check_mode: no
+ when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
index da0bbb627..877ce3149 100644
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -1,17 +1,36 @@
---
# This is the base configuration for installing the other components
-- name: Check for logging project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
- register: logging_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
-- name: "Create logging project"
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in logging_project_result.stderr
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
- name: Create logging cert directory
file: path={{openshift.common.config_base}}/logging state=directory mode=0755
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c7f4a2f93..3d8cd3410 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- fail:
msg: Only one Fluentd nodeselector key pair should be provided
- when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+ when: openshift_logging_fluentd_nodeselector.keys() | count > 1
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
@@ -28,6 +28,7 @@
register: local_tmp
changed_when: False
check_mode: no
+ become: no
- debug: msg="Created local temp dir {{local_tmp.stdout}}"
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index cb9509de1..a0ed56ebd 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -1,52 +1,52 @@
---
-- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
+- oc_obj:
+ kind: "{{ file_content.kind }}"
+ name: "{{ file_content.metadata.name }}"
+ state: present
+ namespace: "{{ namespace }}"
+ files:
+ - "{{ file_name }}"
+ when: file_content.kind not in ["Service", "Route"]
-- name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
+## still need to do this for services until the template logic is replaced by oc_*
+- block:
+ - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
+ changed_when: no
-- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_changed
- failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''"
- changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int
- when:
- - "'field is immutable' not in generation_apply.stderr"
+ - name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
-- name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Removing previous {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ delete -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_delete
+ failed_when: "'error' in generation_delete.stderr"
+ changed_when: generation_delete.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
-- name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
+ - name: Recreating {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: generation_apply.rc == 0
+ when: "'field is immutable' in generation_apply.stderr"
+ when: file_content.kind in ["Service", "Route"]
diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml
new file mode 100644
index 000000000..056ff6b98
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_shared_key.yaml
@@ -0,0 +1,25 @@
+---
+- name: Checking for {{ shared_key_info.procure_component }}_shared_key
+ stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key"
+ register: component_shared_key_file
+ check_mode: no
+
+- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }}
+ set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }}
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ check_mode: no
+
+- name: Creating shared_key for {{ shared_key_info.procure_component }}
+ copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}"
+ dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - not component_shared_key_file.stat.exists
+
+- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key"
+ check_mode: no
+ when:
+ - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined
+ - not component_shared_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
new file mode 100644
index 000000000..4afe4e641
--- /dev/null
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -0,0 +1,80 @@
+---
+- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
+ when: es_spec.volumes is defined
+
+- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: es_storage_claim=""
+ when:
+ - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
+
+## take an ES dc and evaluate its storage option
+# if it is a hostmount or emptydir we don't do anything with it
+# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
+- oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ es_storage_claim }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ register: pvc_spec
+ failed_when: pvc_spec.results.stderr is defined
+ when:
+ - es_spec.volumes is defined
+ - es_storage_type.persistentVolumeClaim is defined
+
+- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
+ when:
+ - pvc_spec.results is defined
+ - pvc_spec.results.results[0].spec is defined
+
+# if not create the pvc and use it
+- block:
+
+ - name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: not es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{ es_pvc_size }}"
+ access_modes: "{{ openshift_logging_storage_access_modes }}"
+ pv_selector: "{{ es_pv_selector }}"
+ when: es_pvc_dynamic | bool
+ check_mode: no
+ changed_when: no
+
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
+
+ when:
+ - es_pvc_size | search('^\d.*')
+ - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: "{{ es_component }}"
+ deploy_name: "{{ es_name }}"
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{ es_component }}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{ es_cpu_limit }}"
+ es_memory_limit: "{{ es_memory_limit }}"
+ es_node_selector: "{{ es_node_selector }}"
+ es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index edbb62c3e..c1592b830 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -21,6 +21,29 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: start mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
+ loop_control:
+ loop_var: object
+ when:
+ - mux_dc.results is defined
+ - mux_dc.results.results is defined
+ - openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 4b3722e29..f4b419d84 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -21,6 +21,26 @@
loop_control:
loop_var: fluentd_host
+- name: Retrieve mux
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=mux"
+ namespace: "{{openshift_logging_namespace}}"
+ register: mux_dc
+ when: openshift_logging_use_mux
+
+- name: stop mux
+ oc_scale:
+ kind: dc
+ name: "{{ object }}"
+ namespace: "{{openshift_logging_namespace}}"
+ replicas: 0
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_mux
+
- name: Retrieve elasticsearch
oc_obj:
state: list
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index cef835668..10f522b61 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.loggingPublicURL
yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index a0fefd882..c6284166b 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
index 93c4d854c..355642cb7 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -28,11 +28,10 @@ cloud:
discovery:
type: kubernetes
zen.ping.multicast.enabled: false
- zen.minimum_master_nodes: {{es_min_masters}}
+ zen.minimum_master_nodes: ${NODE_QUORUM}
gateway:
- expected_master_nodes: ${NODE_QUORUM}
- recover_after_nodes: ${RECOVER_AFTER_NODES}
+ recover_after_nodes: ${NODE_QUORUM}
expected_nodes: ${RECOVER_EXPECTED_NODES}
recover_after_time: ${RECOVER_AFTER_TIME}
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index f89855bf5..680c16cf4 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -78,9 +78,6 @@ spec:
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
-
- name: "RECOVER_AFTER_NODES"
- value: "{{es_recover_after_nodes}}"
- -
name: "RECOVER_EXPECTED_NODES"
value: "{{es_recover_expected_nodes}}"
-
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 0bf1686ad..5c93d823e 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -59,6 +59,14 @@ spec:
- name: dockercfg
mountPath: /etc/sysconfig/docker
readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+{% endif %}
env:
- name: "K8S_HOST_URL"
value: "{{openshift_logging_master_url}}"
@@ -122,6 +130,8 @@ spec:
value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: "USE_MUX_CLIENT"
+ value: "{{openshift_logging_use_mux_client| default('false')}}"
volumes:
- name: runlogjournal
hostPath:
@@ -147,3 +157,11 @@ spec:
- name: dockercfg
hostPath:
path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
+{% if openshift_logging_use_mux_client | bool %}
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
+{% endif %}
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index e6ecf82ff..25fab9ac4 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -44,15 +44,19 @@ spec:
{% if kibana_cpu_limit is not none %}
cpu: "{{kibana_cpu_limit}}"
{% endif %}
-{% if kibana_memory_limit is not none %}
- memory: "{{kibana_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_memory_limit | default('736Mi') }}"
{% endif %}
env:
- name: "ES_HOST"
value: "{{es_host}}"
- name: "ES_PORT"
value: "{{es_port}}"
+ -
+ name: "KIBANA_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana
+ resource: limits.memory
volumeMounts:
- name: kibana
mountPath: /etc/kibana/keys
@@ -67,9 +71,7 @@ spec:
{% if kibana_proxy_cpu_limit is not none %}
cpu: "{{kibana_proxy_cpu_limit}}"
{% endif %}
-{% if kibana_proxy_memory_limit is not none %}
- memory: "{{kibana_proxy_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"
{% endif %}
ports:
-
@@ -103,6 +105,27 @@ spec:
-
name: "OAP_DEBUG"
value: "{{openshift_logging_kibana_proxy_debug}}"
+ -
+ name: "OAP_OAUTH_SECRET_FILE"
+ value: "/secret/oauth-secret"
+ -
+ name: "OAP_SERVER_CERT_FILE"
+ value: "/secret/server-cert"
+ -
+ name: "OAP_SERVER_KEY_FILE"
+ value: "/secret/server-key"
+ -
+ name: "OAP_SERVER_TLS_FILE"
+ value: "/secret/server-tls.json"
+ -
+ name: "OAP_SESSION_SECRET_FILE"
+ value: "/secret/session-secret"
+ -
+ name: "OCP_AUTH_PROXY_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana-proxy
+ resource: limits.memory
volumeMounts:
- name: kibana-proxy
mountPath: /secret
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2
new file mode 100644
index 000000000..41e6abd52
--- /dev/null
+++ b/roles/openshift_logging/templates/mux.j2
@@ -0,0 +1,121 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-fluentd
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in mux_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ - name: "mux"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if mux_cpu_limit is not none %}
+ cpu: "{{mux_cpu_limit}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
+ memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ - containerPort: "{{ openshift_logging_mux_port }}"
+ name: mux-forward
+ volumeMounts:
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: muxcerts
+ mountPath: /etc/fluent/muxkeys
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{openshift_logging_master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "USE_JOURNAL"
+ value: "false"
+ - name: "JOURNAL_SOURCE"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ - name: FORWARD_LISTEN_HOST
+ value: "{{ openshift_logging_mux_hostname }}"
+ - name: FORWARD_LISTEN_PORT
+ value: "{{ openshift_logging_mux_port }}"
+ - name: USE_MUX
+ value: "true"
+ - name: MUX_ALLOW_EXTERNAL
+ value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ volumes:
+ - name: config
+ configMap:
+ name: logging-mux
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: muxcerts
+ secret:
+ secretName: logging-mux
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
index 6c4ec0c76..70644a39c 100644
--- a/roles/openshift_logging/templates/service.j2
+++ b/roles/openshift_logging/templates/service.j2
@@ -26,3 +26,9 @@ spec:
{% for key, value in selector.iteritems() %}
{{key}}: {{value}}
{% endfor %}
+{% if externalIPs is defined -%}
+ externalIPs:
+{% for ip in externalIPs %}
+ - {{ ip }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
index e06625e3f..e561b41e2 100644
--- a/roles/openshift_logging/vars/main.yaml
+++ b/roles/openshift_logging/vars/main.yaml
@@ -1,12 +1,8 @@
---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}"
-es_min_masters_default: "{{ (openshift_logging_es_cluster_size | int / 2 | round(0,'floor') + 1) | int }}"
-es_min_masters: "{{ (openshift_logging_es_cluster_size == 1) | ternary(1, es_min_masters_default)}}"
-es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}"
-es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}"
-es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}"
-es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size|int - 1}}"
-es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size|int}}"
+es_node_quorum: "{{ (openshift_logging_es_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size | int}}"
+es_ops_node_quorum: "{{ (openshift_logging_es_ops_cluster_size | int/2 | round(0,'floor') + 1) | int}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size | int}}"
es_log_appenders: ['file', 'console']