diff options
69 files changed, 1853 insertions, 195 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index c05d9e64b..bf76a3913 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.37.0 ./ +3.9.0-0.39.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 5d2d83750..1ec707543 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.37.0%{?dist} +Release: 0.39.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -201,6 +201,33 @@ Atomic OpenShift Utilities includes %changelog +* Tue Feb 06 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.39.0 +- Update code to not fail when rc != 0 (kwoodson@redhat.com) +- Upgrades: pass openshift_manage_node_is_master to master nodes during upgrade + (vrutkovs@redhat.com) +- Updates to configure monitoring container. (kwoodson@redhat.com) +- Move cert SAN update logic to openshift-etcd (rteague@redhat.com) +- Swapping container order for es pod (ewolinet@redhat.com) +- Adding support for ES 5.x tech preview opt in (ewolinet@redhat.com) +- bug 1540799: openshift_prometheus: update alertmanager config file flag + (pgier@redhat.com) +- parameterize various master scale group bits (jdiaz@redhat.com) +- Use rollout instead of deploy (deprecated) (rteague@redhat.com) +- cri-o: export variables defined in crio-network (gscrivan@redhat.com) + +* Mon Feb 05 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.38.0 +- Moving upgrade sg playbook to 3.9 (kwoodson@redhat.com) +- remove openshift_upgrade_{pre,post}_storage_migration_enabled from + failed_when (nakayamakenjiro@gmail.com) +- Fix version handling in 3.8/3.9 control plane upgrades (rteague@redhat.com) +- add S3 bucket cleanup (jdiaz@redhat.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- Parameterize user and disable_root options in cloud config + (nelluri@redhat.com) +- Fix softlinks broken by d3fefc32a727fe3c13159c4e9fe4399f35b487a8 + (Klaas-@users.noreply.github.com) + * Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.37.0 - Don't use 'omit' for package module (vrutkovs@redhat.com) - Adding requirements for logging and metrics (ewolinet@redhat.com) diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index f44ab3580..9c927c0a1 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -119,6 +119,9 @@ - shell: > echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' register: cert_output + changed_when: false + failed_when: + - cert_output.rc not in [0, 1] # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs - name: set a fact to include the registry certs playbook if needed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 9b5ba3482..40e245d75 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,29 +3,6 @@ # Upgrade Masters ############################################################################### -# Prior to 3.6, openshift-ansible created etcd serving certificates -# without a SubjectAlternativeName entry for the system hostname. The -# SAN list in Go 1.8 is now (correctly) authoritative and since -# openshift-ansible configures masters to talk to etcd hostnames -# rather than IP addresses, we must correct etcd certificates. -# -# This play examines the etcd serving certificate SANs on each etcd -# host and records whether or not the system hostname is missing. -- name: Examine etcd serving certificate SAN - hosts: oo_etcd_to_config - tasks: - - slurp: - src: /etc/etcd/server.crt - register: etcd_serving_cert - - set_fact: - __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" - -# Redeploy etcd certificates when hostnames were missing from etcd -# serving certificate SANs. -- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml - when: - - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) - - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -340,3 +317,4 @@ tasks_from: config.yml vars: openshift_master_host: "{{ groups.oo_first_master.0 }}" + openshift_manage_node_is_master: true diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index 8997680f9..fea588260 100644 --- a/playbooks/openshift-etcd/private/upgrade_main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -1,4 +1,37 @@ --- +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN + hosts: oo_etcd_to_config + tasks: + - slurp: + src: /etc/etcd/server.crt + register: etcd_serving_cert + - set_fact: + __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: redeploy-certificates.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}" + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: ../../openshift-master/private/restart.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + # For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to # upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius # task for RHEL and CENTOS it's simply not possible in Fedora unless you've diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index b817221b8..d88209593 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -88,8 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift_client_binary }} deploy dc/docker-registry - --latest + {{ openshift_client_binary }} rollout latest dc/docker-registry --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index 0df748f47..952a5f4ee 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -129,8 +129,7 @@ - name: Redeploy router command: > - {{ openshift_client_binary }} deploy dc/router - --latest + {{ openshift_client_binary }} rollout latest dc/router --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index 07aa8bfde..f2a57f9f8 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -11,6 +11,38 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- name: Update vm.max_map_count for ES 5.x + hosts: all + gather_facts: false + tasks: + - when: + - openshift_logging_es5_techpreview | default(false) | bool + - openshift_deployment_type in ['origin'] + block: + - name: Checking vm max_map_count value + command: + cat /proc/sys/vm/max_map_count + register: _vm_max_map_count + + - stat: + path: /etc/sysctl.d/99-elasticsearch.conf + register: _99_es_conf + + - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf + command: > + sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/' + register: _curr_vm_max_map_count + when: _99_es_conf.stat.exists + + - name: Updating vm.max_map_count value + sysctl: + name: vm.max_map_count + value: 262144 + sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf" + reload: yes + when: + - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144 + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: @@ -20,11 +52,10 @@ - name: Update Master configs hosts: oo_masters:!oo_first_master tasks: - - block: - - import_role: - name: openshift_logging - tasks_from: update_master_config - when: not openshift.common.version_gte_3_9 + - include_role: + name: openshift_logging + tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2 index 763be97d7..ae8a506fe 100644 --- a/roles/container_runtime/templates/crio-network.j2 +++ b/roles/container_runtime/templates/crio-network.j2 @@ -1,9 +1,9 @@ {% if 'http_proxy' in openshift.common %} -HTTP_PROXY={{ openshift.common.http_proxy }} +export HTTP_PROXY={{ openshift.common.http_proxy }} {% endif %} {% if 'https_proxy' in openshift.common %} -HTTPS_PROXY={{ openshift.common.https_proxy }} +export HTTPS_PROXY={{ openshift.common.https_proxy }} {% endif %} {% if 'no_proxy' in openshift.common %} -NO_PROXY={{ openshift.common.no_proxy }} +export NO_PROXY={{ openshift.common.no_proxy }} {% endif %} diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 1696c2751..3d966e34a 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -173,18 +173,20 @@ openshift_aws_node_groups: openshift_aws_created_asgs: [] openshift_aws_current_asgs: [] +openshift_aws_scale_group_health_check: + period: 60 + type: EC2 + # these will be used during upgrade openshift_aws_master_group_config: # The 'master' key is always required here. master: - instance_type: m4.xlarge + instance_type: "{{ openshift_aws_master_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_master_volumes }}" - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 3 - desired_size: 3 + health_check: "{{ openshift_aws_scale_group_health_check }}" + min_size: "{{ openshift_aws_master_group_min_size | default(3) }}" + max_size: "{{ openshift_aws_master_group_max_size | default(3) }}" + desired_size: "{{ openshift_aws_master_group_desired_size | default(3) }}" wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" @@ -196,14 +198,12 @@ openshift_aws_master_group_config: openshift_aws_node_group_config: # The 'compute' key is always required here. compute: - instance_type: m4.xlarge + instance_type: "{{ openshift_aws_compute_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" - health_check: - period: 60 - type: EC2 - min_size: 3 - max_size: 100 - desired_size: 3 + health_check: "{{ openshift_aws_scale_group_health_check }}" + min_size: "{{ openshift_aws_compute_group_min_size | default(3) }}" + max_size: "{{ openshift_aws_compute_group_max_size | default(100) }}" + desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" @@ -211,14 +211,12 @@ openshift_aws_node_group_config: policy_json: "{{ openshift_aws_iam_role_policy_json }}" # The 'infra' key is always required here. infra: - instance_type: m4.xlarge + instance_type: "{{ openshift_aws_infra_group_instance_type | default('m4.xlarge') }}" volumes: "{{ openshift_aws_node_group_config_node_volumes }}" - health_check: - period: 60 - type: EC2 - min_size: 2 - max_size: 20 - desired_size: 2 + health_check: "{{ openshift_aws_scale_group_health_check }}" + min_size: "{{ openshift_aws_infra_group_min_size | default(2) }}" + max_size: "{{ openshift_aws_infra_group_max_size | default(20) }}" + desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml index ebe5671d2..bb9803c2b 100644 --- a/roles/openshift_daemonset_config/defaults/main.yml +++ b/roles/openshift_daemonset_config/defaults/main.yml @@ -1,16 +1,19 @@ --- -openshift_daemonset_config_namespace: openshift-node -openshift_daemonset_config_daemonset_name: ops-node-config -openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}" +openshift_daemonset_config_image: "centos:7" +openshift_daemonset_config_monitoring_image: "openshifttools/oso-centos7-host-monitoring:latest" +openshift_daemonset_config_namespace: openshift-config +openshift_daemonset_config_daemonset_name: node-config +openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}-configmap" +openshift_daemonset_config_monitoring_pos: "false" openshift_daemonset_config_node_selector: config: config -openshift_daemonset_config_sa_name: ops +openshift_daemonset_config_sa_name: configurator openshift_daemonset_config_configmap_files: {} openshift_daemonset_config_configmap_literals: {} openshift_daemonset_config_monitoring: False openshift_daemonset_config_interval: 300 openshift_daemonset_config_script: config.sh -openshift_daemonset_config_secret_name: operations-config-secret +openshift_daemonset_config_secret_name: "{{ openshift_daemonset_config_daemonset_name }}-secret" openshift_daemonset_config_secrets: {} openshift_daemonset_config_runasuser: 0 openshift_daemonset_config_privileged: True diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml index 450cc9dca..f8f42b771 100644 --- a/roles/openshift_daemonset_config/tasks/main.yml +++ b/roles/openshift_daemonset_config/tasks/main.yml @@ -1,4 +1,9 @@ --- +- name: create the namespace + oc_project: + state: present + name: "{{ openshift_daemonset_config_namespace }}" + - name: add a sa oc_serviceaccount: name: "{{ openshift_daemonset_config_sa_name }}" @@ -25,11 +30,6 @@ dest: "{{ item.value }}" with_dict: "{{ openshift_daemonset_config_configmap_files }}" -- name: create the namespace - oc_project: - state: present - name: "{{ openshift_daemonset_config_namespace }}" - - name: lay down secrets oc_secret: state: present @@ -39,6 +39,7 @@ contents: "{{ openshift_daemonset_config_secrets }}" when: - openshift_daemonset_config_secrets != {} + register: secout - name: create the configmap oc_configmap: @@ -47,6 +48,7 @@ namespace: "{{ openshift_daemonset_config_namespace }}" from_literal: "{{ openshift_daemonset_config_configmap_literals }}" from_file: "{{ openshift_daemonset_config_configmap_files }}" + register: cmout - name: deploy daemonset oc_obj: @@ -56,3 +58,4 @@ kind: daemonset files: - /tmp/daemonset.yml + force: "{{ True if cmout.changed or secout.changed else False | bool }}" diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 index 9792f6d16..02cd5bcfd 100644 --- a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 +++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 @@ -33,7 +33,7 @@ spec: hostIPC: true containers: - name: config - image: centos:7 + image: "{{ openshift_daemonset_config_image }}" env: - name: RESYNC_INTERVAL value: "{{ openshift_daemonset_config_interval }}" @@ -50,8 +50,8 @@ spec: sh /opt/config/{{ openshift_daemonset_config_script }} # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again - echo "Success, sleeping for ${RESYNC_INTERVAL}s" - exec sleep ${RESYNC_INTERVAL} + echo "Success, sleeping for ${RESYNC_INTERVAL}s. Date: $(date)" + sleep ${RESYNC_INTERVAL} # Return to perform the config done @@ -68,6 +68,8 @@ spec: # Our node configuration - mountPath: /opt/config name: config + - mountPath: /opt/tmp_shared_config + name: tmp-shared-dir {% if openshift_daemonset_config_secrets != {} %} # Our delivered secrets - mountPath: /opt/secrets @@ -79,12 +81,14 @@ spec: memory: {{ openshift_daemonset_config_resources.memory }} {% if openshift_daemonset_config_monitoring %} - name: monitoring - image: openshifttools/oso-centos7-host-monitoring:latest + image: "{{ openshift_daemonset_config_monitoring_image }}" + env: + - name: OO_PAUSE_ON_START + value: "{{ openshift_daemonset_config_monitoring_pos }}" securityContext: # Must be root to read content runAsUser: 0 privileged: true - volumeMounts: - mountPath: /host name: host @@ -118,17 +122,23 @@ spec: - mountPath: /host/var/cache/yum subPath: var/cache/yum name: host - - mountPath: /container_setup/monitoring-config.yml - subPath: monitoring-config.yaml - name: config + readOnly: true + - mountPath: /container_setup + name: tmp-shared-dir - mountPath: /opt/config name: config +{% if openshift_daemonset_config_secrets != {} %} + - mountPath: /opt/secrets + name: secrets +{% endif %} resources: requests: cpu: 10m memory: 10Mi {% endif %} volumes: + - name: tmp-shared-dir + emptyDir: {} - name: config configMap: name: {{ openshift_daemonset_config_configmap_name }} diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 2f1aa061f..e887fd691 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -12,6 +12,8 @@ openshift_logging_install_logging: False openshift_logging_purge_logging: False openshift_logging_image_pull_secret: "" +openshift_logging_es5_techpreview: False + openshift_logging_curator_default_days: 30 openshift_logging_curator_run_hour: 0 openshift_logging_curator_run_minute: 0 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index c905502ac..9fabc5826 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -59,6 +59,14 @@ vars: generated_certs_dir: "{{openshift.common.config_base}}/logging" +- set_fact: + __base_file_dir: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}" + __es_version: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}" + +- set_fact: + openshift_logging_image_version: "techpreview" + when: openshift_logging_es5_techpreview | bool + ## Elasticsearch - set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 60cc399fa..57426bc77 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,6 +3,11 @@ msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 +- assert: + that: openshift_deployment_type in ['origin'] + msg: "Only 'origin' deployments are allowed with openshift_logging_es5_techpreview set to true" + when: openshift_logging_es5_techpreview | bool + - name: Create temp directory for doing work in command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX register: mktemp diff --git a/roles/openshift_logging_curator/files/curator.yml b/roles/openshift_logging_curator/files/2.x/curator.yml index 8d62d8e7d..8d62d8e7d 100644 --- a/roles/openshift_logging_curator/files/curator.yml +++ b/roles/openshift_logging_curator/files/2.x/curator.yml diff --git a/roles/openshift_logging_curator/files/5.x/curator.yml b/roles/openshift_logging_curator/files/5.x/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging_curator/files/5.x/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +# delete: +# days: 30 +# runhour: 0 +# runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +# delete: +# weeks: 8 + +# example for a normal project +#myapp: +# delete: +# weeks: 1 diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index cc68998f5..6e8605d28 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -52,7 +52,7 @@ # configmap - copy: - src: curator.yml + src: "{{ __base_file_dir }}/curator.yml" dest: "{{ tempdir }}/curator.yml" changed_when: no @@ -96,7 +96,7 @@ # TODO: scale should not exceed 1 - name: Generate Curator deploymentconfig template: - src: curator.j2 + src: "{{ __base_file_dir }}/curator.j2" dest: "{{ tempdir }}/templates/curator-dc.yaml" vars: component: "{{ curator_component }}" diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/2.x/curator.j2 index 8acff8141..8acff8141 100644 --- a/roles/openshift_logging_curator/templates/curator.j2 +++ b/roles/openshift_logging_curator/templates/2.x/curator.j2 diff --git a/roles/openshift_logging_curator/templates/5.x/curator.j2 b/roles/openshift_logging_curator/templates/5.x/curator.j2 new file mode 100644 index 000000000..8acff8141 --- /dev/null +++ b/roles/openshift_logging_curator/templates/5.x/curator.j2 @@ -0,0 +1,113 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{curator_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Recreate + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-curator +{% if curator_node_selector is iterable and curator_node_selector | length > 0 %} + nodeSelector: +{% for key, value in curator_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - + name: "curator" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + resources: +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %} + limits: +{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %} + cpu: "{{curator_cpu_limit}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %} + requests: +{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %} + cpu: "{{curator_cpu_request}}" +{% endif %} +{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %} + memory: "{{curator_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + env: + - + name: "K8S_HOST_URL" + value: "{{openshift_logging_curator_master_url}}" + - + name: "ES_HOST" + value: "{{es_host}}" + - + name: "ES_PORT" + value: "{{es_port}}" + - + name: "ES_CLIENT_CERT" + value: "/etc/curator/keys/cert" + - + name: "ES_CLIENT_KEY" + value: "/etc/curator/keys/key" + - + name: "ES_CA" + value: "/etc/curator/keys/ca" + - + name: "CURATOR_DEFAULT_DAYS" + value: "{{openshift_logging_curator_default_days}}" + - + name: "CURATOR_RUN_HOUR" + value: "{{openshift_logging_curator_run_hour}}" + - + name: "CURATOR_RUN_MINUTE" + value: "{{openshift_logging_curator_run_minute}}" + - + name: "CURATOR_RUN_TIMEZONE" + value: "{{openshift_logging_curator_run_timezone}}" + - + name: "CURATOR_SCRIPT_LOG_LEVEL" + value: "{{openshift_logging_curator_script_log_level}}" + - + name: "CURATOR_LOG_LEVEL" + value: "{{openshift_logging_curator_log_level}}" + volumeMounts: + - name: certs + mountPath: /etc/curator/keys + readOnly: true + - name: config + mountPath: /etc/curator/settings + readOnly: true + volumes: + - name: certs + secret: + secretName: logging-curator + - name: config + configMap: + name: logging-curator diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml deleted file mode 100644 index 567c9f289..000000000 --- a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ClusterRole -metadata: - name: rolebinding-reader -rules: -- resources: - - clusterrolebindings - verbs: - - get diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml index c55e7c5ea..a7cc8f0ec 100644 --- a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml @@ -10,10 +10,14 @@ # should we just assume that we will have the correct major version? - set_fact: es_version="{{ openshift_logging_elasticsearch_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}" - when: openshift_logging_elasticsearch_image_version != 'latest' + when: + - openshift_logging_elasticsearch_image_version != 'latest' + - not openshift_logging_es5_techpreview | default(false) | bool - fail: msg: Invalid version specified for Elasticsearch - when: es_version not in __allowed_es_versions + when: + - es_version not in __allowed_es_versions + - not openshift_logging_es5_techpreview | default(false) | bool - include_tasks: get_es_version.yml diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index b731d93a0..8a174f0d5 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -64,7 +64,6 @@ # we want to make sure we have all the necessary components here # service account - - name: Create ES service account oc_serviceaccount: state: present @@ -82,19 +81,14 @@ - openshift_logging_image_pull_secret == '' # rolebinding reader -- copy: - src: rolebinding-reader.yml - dest: "{{ tempdir }}/rolebinding-reader.yml" - - name: Create rolebinding-reader role - oc_obj: + oc_clusterrole: state: present - name: "rolebinding-reader" - kind: clusterrole - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - files: - - "{{ tempdir }}/rolebinding-reader.yml" - delete_after: true + name: rolebinding-reader + rules: + - apiGroups: [""] + resources: ["clusterrolebindings"] + verbs: ["get"] # SA roles - name: Set rolebinding-reader permissions for ES @@ -114,7 +108,7 @@ # logging-metrics-reader role - template: - src: logging-metrics-role.j2 + src: "{{ __base_file_dir }}/logging-metrics-role.j2" dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml" vars: namespace: "{{ openshift_logging_elasticsearch_namespace }}" @@ -150,7 +144,7 @@ # View role and binding - name: Generate logging-elasticsearch-view-role template: - src: rolebinding.j2 + src: "{{ __base_file_dir }}/rolebinding.j2" dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml" vars: obj_name: logging-elasticsearch-view-role @@ -183,51 +177,80 @@ msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}" - template: - src: elasticsearch-logging.yml.j2 - dest: "{{ tempdir }}/elasticsearch-logging.yml" - vars: - root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}" - when: es_logging_contents is undefined - changed_when: no - -- template: - src: elasticsearch.yml.j2 + src: "{{ __base_file_dir }}/elasticsearch.yml.j2" dest: "{{ tempdir }}/elasticsearch.yml" vars: allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}" es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}" - - when: es_config_contents is undefined changed_when: no # create diff between current configmap files and our current files -# NOTE: include_role must be used instead of import_role because -# this task file is looped over from another role. -- include_role: - name: openshift_logging - tasks_from: patch_configmap_files.yaml - vars: - configmap_name: "logging-elasticsearch" - configmap_namespace: "logging" - configmap_file_names: - - current_file: "elasticsearch.yml" - new_file: "{{ tempdir }}/elasticsearch.yml" - protected_lines: ["number_of_shards", "number_of_replicas"] - - current_file: "logging.yml" - new_file: "{{ tempdir }}/elasticsearch-logging.yml" - -- name: Set ES configmap - oc_configmap: - state: present - name: "{{ elasticsearch_name }}" - namespace: "{{ openshift_logging_elasticsearch_namespace }}" - from_file: - elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" - logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" - register: es_config_creation - notify: "restart elasticsearch" +- when: not openshift_logging_es5_techpreview + block: + - template: + src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2" + dest: "{{ tempdir }}/elasticsearch-logging.yml" + vars: + root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}" + changed_when: no + + - include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + protected_lines: ["number_of_shards", "number_of_replicas"] + - current_file: "logging.yml" + new_file: "{{ tempdir }}/elasticsearch-logging.yml" + + - name: Set ES configmap + oc_configmap: + state: present + name: "{{ elasticsearch_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + from_file: + elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" + logging.yml: "{{ tempdir }}/elasticsearch-logging.yml" + register: es_config_creation + notify: "restart elasticsearch" + +- when: openshift_logging_es5_techpreview | bool + block: + - template: + src: "{{ __base_file_dir }}/log4j2.properties.j2" + dest: "{{ tempdir }}/log4j2.properties" + vars: + root_logger: "{{ openshift_logging_es_log_appenders | list }}" + changed_when: no + + - include_role: + name: openshift_logging + tasks_from: patch_configmap_files.yaml + vars: + configmap_name: "logging-elasticsearch" + configmap_namespace: "logging" + configmap_file_names: + - current_file: "elasticsearch.yml" + new_file: "{{ tempdir }}/elasticsearch.yml" + - current_file: "log4j2.properties" + new_file: "{{ tempdir }}/log4j2.properties" + + - name: Set ES configmap + oc_configmap: + state: present + name: "{{ elasticsearch_name }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + from_file: + elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml" + log4j2.properties: "{{ tempdir }}/log4j2.properties" + register: es_config_creation + notify: "restart elasticsearch" - when: es_config_creation.changed | bool block: @@ -341,7 +364,7 @@ # storageclasses with the storageClassName set to "" in pvc.j2 - name: Creating ES storage template - static template: - src: pvc.j2 + src: "{{ __base_file_dir }}/pvc.j2" dest: "{{ tempdir }}/templates/logging-es-pvc.yml" vars: obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" @@ -355,7 +378,7 @@ # Storageclasses are used by default if configured - name: Creating ES storage template - dynamic template: - src: pvc.j2 + src: "{{ __base_file_dir }}/pvc.j2" dest: "{{ tempdir }}/templates/logging-es-pvc.yml" vars: obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}" @@ -386,7 +409,7 @@ # DC - name: Set ES dc templates template: - src: es.j2 + src: "{{ __base_file_dir }}/es.j2" dest: "{{ tempdir }}/templates/logging-es-dc.yml" vars: es_cluster_name: "{{ es_component }}" @@ -404,6 +427,8 @@ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}" + es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}" - name: Set ES dc oc_obj: @@ -462,7 +487,7 @@ - name: Generating Elasticsearch {{ es_component }} route template template: - src: route_reencrypt.j2 + src: "{{ __base_file_dir }}/route_reencrypt.j2" dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml" vars: obj_name: "logging-{{ es_component }}" diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 index c7b2b2721..c7b2b2721 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 index 65b08d970..65b08d970 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2 index b1d6a4489..e3315adc8 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/es.j2 @@ -41,43 +41,7 @@ spec: {% endfor %} {% endif %} containers: - - name: proxy - image: {{ proxy_image }} - imagePullPolicy: IfNotPresent - args: - - --upstream-ca=/etc/elasticsearch/secret/admin-ca - - --https-address=:4443 - - -provider=openshift - - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} - - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token - - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} - - -basic-auth-password={{ basic_auth_passwd }} - - -upstream=https://localhost:9200 - - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' - - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' - - --tls-cert=/etc/tls/private/tls.crt - - --tls-key=/etc/tls/private/tls.key - - -pass-access-token - - -pass-user-headers - ports: - - containerPort: 4443 - name: proxy - protocol: TCP - volumeMounts: - - mountPath: /etc/tls/private - name: proxy-tls - readOnly: true - - mountPath: /etc/elasticsearch/secret - name: elasticsearch - readOnly: true - resources: - limits: - memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - requests: - cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" - memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" - - - name: "elasticsearch" + - name: "elasticsearch" image: {{image}} imagePullPolicy: IfNotPresent resources: @@ -165,6 +129,42 @@ spec: initialDelaySeconds: 10 timeoutSeconds: 30 periodSeconds: 5 + - + name: proxy + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -basic-auth-password={{ basic_auth_passwd }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" volumes: - name: proxy-tls secret: diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 index d9800e5a5..d9800e5a5 100644 --- a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 index 3c6896df4..3c6896df4 100644 --- a/roles/openshift_logging_elasticsearch/templates/pvc.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 index fcd4e87cc..fcd4e87cc 100644 --- a/roles/openshift_logging_elasticsearch/templates/rolebinding.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 index d2e8b8bcb..d2e8b8bcb 100644 --- a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 +++ b/roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2 diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 new file mode 100644 index 000000000..009471d2c --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2 @@ -0,0 +1,74 @@ +cluster: + name: ${CLUSTER_NAME} + +script: + inline: true + stored: true + +node: + name: ${DC_NAME} + master: ${IS_MASTER} + data: ${HAS_DATA} + max_local_storage_nodes: 1 + +network: + host: 0.0.0.0 + +cloud: + kubernetes: + service: ${SERVICE_DNS} + namespace: ${NAMESPACE} + +discovery.zen: + hosts_provider: kubernetes + minimum_master_nodes: ${NODE_QUORUM} + +gateway: + recover_after_nodes: ${NODE_QUORUM} + expected_nodes: ${RECOVER_EXPECTED_NODES} + recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json + +openshift.config: + use_common_data_model: true + project_index_prefix: "project" + time_field_name: "@timestamp" + +openshift.searchguard: + keystore.path: /etc/elasticsearch/secret/admin.jks + truststore.path: /etc/elasticsearch/secret/searchguard.truststore + +openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}} + +openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}} + +path: + data: /elasticsearch/persistent/${CLUSTER_NAME}/data + logs: /elasticsearch/${CLUSTER_NAME}/logs + +searchguard: + authcz.admin_dn: + - CN=system.admin,OU=OpenShift,O=Logging + config_index_name: ".searchguard.${DC_NAME}" + ssl: + transport: + enabled: true + enforce_hostname_verification: false + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/searchguard.key + keystore_password: kspass + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore + truststore_password: tspass + http: + enabled: true + keystore_type: JKS + keystore_filepath: /etc/elasticsearch/secret/key + keystore_password: kspass + clientauth_mode: OPTIONAL + truststore_type: JKS + truststore_filepath: /etc/elasticsearch/secret/truststore + truststore_password: tspass diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 new file mode 100644 index 000000000..8685b7849 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/es.j2 @@ -0,0 +1,194 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{es_replicas|default(1)}} + revisionHistoryLimit: 0 + selector: + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + logging-infra: "{{logging_component}}" + strategy: + type: Recreate + triggers: [] + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + deployment: "{{deploy_name}}" + spec: + terminationGracePeriod: 600 + serviceAccountName: aggregated-logging-elasticsearch + securityContext: + supplementalGroups: +{% for group in es_storage_groups %} + - {{group}} +{% endfor %} +{% if es_node_selector is iterable and es_node_selector | length > 0 %} + nodeSelector: +{% for key, value in es_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "elasticsearch" + image: {{image}} + imagePullPolicy: IfNotPresent + resources: + limits: +{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %} + cpu: "{{es_cpu_limit}}" +{% endif %} + memory: "{{es_memory_limit}}" + requests: + cpu: "{{es_cpu_request}}" + memory: "{{es_memory_limit}}" +{% if es_container_security_context %} + securityContext: {{ es_container_security_context | to_yaml }} +{% endif %} + ports: + - + containerPort: 9200 + name: "restapi" + - + containerPort: 9300 + name: "cluster" + env: + - + name: "DC_NAME" + value: "{{deploy_name}}" + - + name: "NAMESPACE" + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - + name: "KUBERNETES_TRUST_CERT" + value: "true" + - + name: "SERVICE_DNS" + value: "logging-{{es_cluster_name}}-cluster" + - + name: "CLUSTER_NAME" + value: "logging-{{es_cluster_name}}" + - + name: "INSTANCE_RAM" + value: "{{openshift_logging_elasticsearch_memory_limit}}" + - + name: "HEAP_DUMP_LOCATION" + value: "/elasticsearch/persistent/heapdump.hprof" + - + name: "NODE_QUORUM" + value: "{{es_node_quorum | int}}" + - + name: "RECOVER_EXPECTED_NODES" + value: "{{es_recover_expected_nodes}}" + - + name: "RECOVER_AFTER_TIME" + value: "{{openshift_logging_elasticsearch_recover_after_time}}" + - + name: "READINESS_PROBE_TIMEOUT" + value: "30" + - + name: "POD_LABEL" + value: "component={{component}}" + - + name: "IS_MASTER" + value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}" + + - + name: "HAS_DATA" + value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}" + - + name: "PROMETHEUS_USER" + value: "{{openshift_logging_elasticsearch_prometheus_sa}}" + + - + name: "PRIMARY_SHARDS" + value: "{{ es_number_of_shards | default ('1') }}" + + - + name: "REPLICA_SHARDS" + value: "{{ es_number_of_replicas | default ('0') }}" + + volumeMounts: + - name: elasticsearch + mountPath: /etc/elasticsearch/secret + readOnly: true + - name: elasticsearch-config + mountPath: /usr/share/java/elasticsearch/config + readOnly: true + - name: elasticsearch-storage + mountPath: /elasticsearch/persistent + readinessProbe: + exec: + command: + - "/usr/share/elasticsearch/probe/readiness.sh" + initialDelaySeconds: 10 + timeoutSeconds: 30 + periodSeconds: 5 + - + name: proxy + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent + args: + - --upstream-ca=/etc/elasticsearch/secret/admin-ca + - --https-address=:4443 + - -provider=openshift + - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -upstream=https://localhost:9200 + - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' + - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - -pass-access-token + - -pass-user-headers + ports: + - containerPort: 4443 + name: proxy + protocol: TCP + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + readOnly: true + - mountPath: /etc/elasticsearch/secret + name: elasticsearch + readOnly: true + resources: + limits: + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + requests: + cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}" + memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}" + volumes: + - name: proxy-tls + secret: + secretName: prometheus-tls + - name: elasticsearch + secret: + secretName: logging-elasticsearch + - name: elasticsearch-config + configMap: + name: logging-elasticsearch + - name: elasticsearch-storage +{% if openshift_logging_elasticsearch_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_elasticsearch_pvc_name }} +{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %} + hostPath: + path: {{ openshift_logging_elasticsearch_hostmount_path }} +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 new file mode 100644 index 000000000..1e78e4ea0 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2 @@ -0,0 +1,78 @@ +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true + +rootLogger.level = info +{% if 'console' in root_logger %} +rootLogger.appenderRef.console.ref = console +{% endif %} +{% if 'file' in root_logger %} +rootLogger.appenderRef.rolling.ref = rolling +{% endif %} + +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = RollingFile +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log +appender.index_search_slowlog_rolling.layout.type = PatternLayout +appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log +appender.index_search_slowlog_rolling.policies.type = Policies +appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_search_slowlog_rolling.policies.time.interval = 1 +appender.index_search_slowlog_rolling.policies.time.modulate = true + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = RollingFile +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log +appender.index_indexing_slowlog_rolling.policies.type = Policies +appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling.policies.time.interval = 1 +appender.index_indexing_slowlog_rolling.policies.time.modulate = true + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 new file mode 100644 index 000000000..d9800e5a5 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2 @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: List +items: +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: prometheus-metrics-viewer + namespace: {{ namespace }} + rules: + - apiGroups: + - metrics.openshift.io + resources: + - prometheus + verbs: + - view +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + name: prometheus-metrics-viewer + namespace: {{ namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-metrics-viewer + subjects: + - kind: ServiceAccount + namespace: {{ role_namespace }} + name: {{ role_user }} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 new file mode 100644 index 000000000..3c6896df4 --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2 @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{obj_name}} + labels: + logging-infra: support +{% if annotations is defined %} + annotations: +{% for key,value in annotations.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} + accessModes: +{% for mode in access_modes %} + - {{ mode }} +{% endfor %} + resources: + requests: + storage: {{size}} +{% if storage_class_name is defined %} + storageClassName: {{ storage_class_name }} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 new file mode 100644 index 000000000..fcd4e87cc --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: RoleBinding +metadata: + name: {{obj_name}} +roleRef: +{% if roleRef.kind is defined %} + kind: {{ roleRef.kind }} +{% endif %} + name: {{ roleRef.name }} +subjects: +{% for sub in subjects %} + - kind: {{ sub.kind }} + name: {{ sub.name }} +{% endfor %} diff --git a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 index d2e8b8bcb..d2e8b8bcb 100644 --- a/roles/openshift_logging_kibana/templates/route_reencrypt.j2 +++ b/roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2 diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml index 6cc839492..41c1c748d 100644 --- a/roles/openshift_logging_elasticsearch/vars/main.yml +++ b/roles/openshift_logging_elasticsearch/vars/main.yml @@ -4,7 +4,6 @@ __allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9", "3_10"] __allowed_es_types: ["data-master", "data-client", "master", "client"] __es_log_appenders: ['file', 'console'] __kibana_index_modes: ["unique", "shared_ops"] -__es_version: "2.4.4" __es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key" diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml index cc01c010d..cc01c010d 100644 --- a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml +++ b/roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml diff --git a/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml new file mode 100644 index 000000000..cc01c010d --- /dev/null +++ b/roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml @@ -0,0 +1,103 @@ +# this openshift template should match (except nodeSelector) jinja2 template in +# ../templates/eventrouter-template.j2 +kind: Template +apiVersion: v1 +metadata: + name: eventrouter-template + annotations: + description: "A pod forwarding kubernetes events to EFK aggregated logging stack." + tags: "events,EFK,logging" +objects: + - kind: ServiceAccount + apiVersion: v1 + metadata: + name: aggregated-logging-eventrouter + - kind: ClusterRole + apiVersion: v1 + metadata: + name: event-reader + rules: + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "watch", "list"] + - kind: ConfigMap + apiVersion: v1 + metadata: + name: logging-eventrouter + data: + config.json: |- + { + "sink": "${SINK}" + } + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: logging-eventrouter + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + spec: + selector: + component: eventrouter + logging-infra: eventrouter + provider: openshift + replicas: ${REPLICAS} + template: + metadata: + labels: + component: eventrouter + logging-infra: eventrouter + provider: openshift + name: logging-eventrouter + spec: + serviceAccount: aggregated-logging-eventrouter + serviceAccountName: aggregated-logging-eventrouter + containers: + - name: kube-eventrouter + image: ${IMAGE} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: ${MEMORY} + cpu: ${CPU} + requires: + memory: ${MEMORY} + volumeMounts: + - name: config-volume + mountPath: /etc/eventrouter + volumes: + - name: config-volume + configMap: + name: logging-eventrouter + - kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: event-reader-binding + subjects: + - kind: ServiceAccount + name: aggregated-logging-eventrouter + namespace: ${NAMESPACE} + roleRef: + kind: ClusterRole + name: event-reader + +parameters: + - name: SINK + displayName: Sink + value: stdout + - name: REPLICAS + displayName: Replicas + value: "1" + - name: IMAGE + displayName: Image + value: "docker.io/openshift/origin-logging-eventrouter:latest" + - name: MEMORY + displayName: Memory + value: "128Mi" + - name: CPU + displayName: CPU + value: "100m" + - name: NAMESPACE + displayName: Namespace + value: default diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml index cbbc6a8ec..fffdd9f8b 100644 --- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml +++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml @@ -23,7 +23,7 @@ # create EventRouter deployment config - name: Generate EventRouter template template: - src: eventrouter-template.j2 + src: "{{ __base_file_dir }}/eventrouter-template.j2" dest: "{{ tempdir }}/templates/eventrouter-template.yaml" vars: node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}" diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 index 3bd29163b..3bd29163b 100644 --- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 +++ b/roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2 diff --git a/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml index 375621ff1..375621ff1 100644 --- a/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml +++ b/roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/2.x/secure-forward.conf index 87410c1c5..87410c1c5 100644 --- a/roles/openshift_logging_fluentd/files/secure-forward.conf +++ b/roles/openshift_logging_fluentd/files/2.x/secure-forward.conf diff --git a/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +# read_lines_limit: 10 +# +#.operations: +# read_lines_limit: 100 diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/5.x/secure-forward.conf index 87410c1c5..87410c1c5 100644 --- a/roles/openshift_logging_mux/files/secure-forward.conf +++ b/roles/openshift_logging_fluentd/files/5.x/secure-forward.conf diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 79ebbca08..ef1c53de3 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -104,17 +104,17 @@ # create Fluentd configmap - template: - src: fluent.conf.j2 + src: "{{ __base_file_dir }}/fluent.conf.j2" dest: "{{ tempdir }}/fluent.conf" vars: deploy_type: "{{ openshift_logging_fluentd_deployment_type }}" - copy: - src: fluentd-throttle-config.yaml + src: "{{ __base_file_dir }}/fluentd-throttle-config.yaml" dest: "{{ tempdir }}/fluentd-throttle-config.yaml" - copy: - src: secure-forward.conf + src: "{{ __base_file_dir }}/secure-forward.conf" dest: "{{ tempdir }}/secure-forward.conf" - import_role: @@ -161,7 +161,7 @@ # TODO: pass in aggregation configurations - name: Generate logging-fluentd daemonset definition template: - src: fluentd.j2 + src: "{{ __base_file_dir }}/fluentd.j2" dest: "{{ tempdir }}/templates/logging-fluentd.yaml" vars: daemonset_name: logging-fluentd diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 index 6e07b403a..6e07b403a 100644 --- a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 +++ b/roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2 diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 index c6256cf49..c6256cf49 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/2.x/fluentd.j2 diff --git a/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 new file mode 100644 index 000000000..6e07b403a --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2 @@ -0,0 +1,80 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +{% if deploy_type in ['hosted', 'secure-aggregator'] %} +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## +{% else %} +<source> + @type secure_forward + @label @INGRESS + + self_hostname ${HOSTNAME} + bind 0.0.0.0 + port {{openshift_logging_fluentd_aggregating_port}} + + shared_key {{openshift_logging_fluentd_shared_key}} + + secure {{openshift_logging_fluentd_aggregating_secure}} + enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} + ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}} + ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} + ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + + <client> + host {{openshift_logging_fluentd_aggregating_host}} + </client> +</source> +{% endif %} + +<label @INGRESS> +{% if deploy_type in ['hosted', 'secure-host'] %} +## filters + @include configs.d/openshift/filter-pre-*.conf + @include configs.d/openshift/filter-retag-journal.conf + @include configs.d/openshift/filter-k8s-meta.conf + @include configs.d/openshift/filter-kibana-transform.conf + @include configs.d/openshift/filter-k8s-flatten-hash.conf + @include configs.d/openshift/filter-k8s-record-transform.conf + @include configs.d/openshift/filter-syslog-record-transform.conf + @include configs.d/openshift/filter-viaq-data-model.conf + @include configs.d/openshift/filter-post-*.conf +## +</label> + +<label @OUTPUT> +## matches + @include configs.d/openshift/output-pre-*.conf + @include configs.d/openshift/output-operations.conf + @include configs.d/openshift/output-applications.conf + # no post - applications.conf matches everything left +## +{% else %} + <match **> + @type secure_forward + + self_hostname ${HOSTNAME} + shared_key {{openshift_logging_fluentd_shared_key}} + + secure {{openshift_logging_fluentd_aggregating_secure}} + enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}} + ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}} + ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}} + ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}} + + <server> + host {{openshift_logging_fluentd_aggregating_host}} + port {{openshift_logging_fluentd_aggregating_port}} + </server> + </match> +{% endif %} +</label> diff --git a/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 b/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 new file mode 100644 index 000000000..c6256cf49 --- /dev/null +++ b/roles/openshift_logging_fluentd/templates/5.x/fluentd.j2 @@ -0,0 +1,249 @@ +apiVersion: extensions/v1beta1 +kind: "DaemonSet" +metadata: + name: "{{ daemonset_name }}" + labels: + provider: openshift + component: "{{ daemonset_component }}" + logging-infra: "{{ daemonset_component }}" +spec: + selector: + matchLabels: + provider: openshift + component: "{{ daemonset_component }}" + updateStrategy: + type: RollingUpdate + rollingUpdate: + minReadySeconds: 600 + template: + metadata: + name: "{{ daemonset_container_name }}" + labels: + logging-infra: "{{ daemonset_component }}" + provider: openshift + component: "{{ daemonset_component }}" + spec: + serviceAccountName: "{{ daemonset_serviceAccount }}" + nodeSelector: + {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}" + containers: + - name: "{{ daemonset_container_name }}" + image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}" + imagePullPolicy: IfNotPresent + securityContext: + privileged: true +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + resources: +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %} + limits: +{% if fluentd_cpu_limit is not none %} + cpu: "{{fluentd_cpu_limit}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %} + requests: +{% if fluentd_cpu_request is not none %} + cpu: "{{fluentd_cpu_request}}" +{% endif %} +{% if fluentd_memory_limit is not none %} + memory: "{{fluentd_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + volumeMounts: + - name: runlogjournal + mountPath: /run/log/journal + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: dockercfg + mountPath: /etc/sysconfig/docker + readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true +{% endif %} + env: + - name: "K8S_HOST_URL" + value: "{{ openshift_logging_fluentd_master_url }}" + - name: "ES_HOST" + value: "{{ app_host }}" + - name: "ES_PORT" + value: "{{ app_port }}" + - name: "ES_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_app_client_cert }}" + - name: "ES_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_app_client_key }}" + - name: "ES_CA" + value: "{{ openshift_logging_fluentd_app_ca }}" + - name: "OPS_HOST" + value: "{{ ops_host }}" + - name: "OPS_PORT" + value: "{{ ops_port }}" + - name: "OPS_CLIENT_CERT" + value: "{{ openshift_logging_fluentd_ops_client_cert }}" + - name: "OPS_CLIENT_KEY" + value: "{{ openshift_logging_fluentd_ops_client_key }}" + - name: "OPS_CA" + value: "{{ openshift_logging_fluentd_ops_ca }}" + - name: "JOURNAL_SOURCE" + value: "{{ openshift_logging_fluentd_journal_source | default('') }}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_size_limit }}" + - name: "FLUENTD_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.cpu + - name: "FLUENTD_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "{{ daemonset_container_name }}" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}" +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: "MUX_CLIENT_MODE" + value: "{{ openshift_logging_mux_client_mode }}" +{% endif %} +{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %} + - name: "TRANSFORM_EVENTS" + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_fluentd_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_fluentd_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_fluentd_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_fluentd_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}" +{% endif %} + +{% if audit_container_engine %} + - name: "AUDIT_CONTAINER_ENGINE" + value: "{{ audit_container_engine | lower }}" +{% endif %} + +{% if audit_container_engine %} + - name: "NODE_NAME" + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{% endif %} + +{% if audit_log_file != '' %} + - name: AUDIT_FILE + value: "{{ audit_log_file }}" +{% endif %} + +{% if audit_pos_log_file != '' %} + - name: AUDIT_POS_FILE + value: "{{ audit_pos_log_file }}" +{% endif %} + + volumes: + - name: runlogjournal + hostPath: + path: /run/log/journal + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: config + configMap: + name: logging-fluentd + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: dockercfg + hostPath: + path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker +{% if openshift_logging_mux_client_mode is defined and + ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or + (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %} + - name: muxcerts + secret: + secretName: logging-mux +{% endif %} + - name: filebufferstorage + hostPath: + path: "/var/lib/fluentd" diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index c67235c62..58edc5ce5 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -133,7 +133,7 @@ - name: Generating Kibana route template template: - src: route_reencrypt.j2 + src: "{{ __base_file_dir }}/route_reencrypt.j2" dest: "{{ tempdir }}/templates/kibana-route.yaml" vars: obj_name: "{{ kibana_name }}" @@ -174,7 +174,7 @@ # create oauth client - name: Create oauth-client template template: - src: oauth-client.j2 + src: "{{ __base_file_dir }}/oauth-client.j2" dest: "{{ tempdir }}/templates/oauth-client.yml" vars: kibana_hostnames: "{{ proxy_hostnames | unique }}" @@ -233,7 +233,7 @@ # create Kibana DC - name: Generate Kibana DC template template: - src: kibana.j2 + src: "{{ __base_file_dir }}/kibana.j2" dest: "{{ tempdir }}/templates/kibana-dc.yaml" vars: component: "{{ kibana_component }}" diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/2.x/kibana.j2 index ed05b8458..ed05b8458 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/2.x/kibana.j2 diff --git a/roles/openshift_logging_kibana/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 index c80ff3d30..c80ff3d30 100644 --- a/roles/openshift_logging_kibana/templates/oauth-client.j2 +++ b/roles/openshift_logging_kibana/templates/2.x/oauth-client.j2 diff --git a/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_kibana/templates/5.x/kibana.j2 b/roles/openshift_logging_kibana/templates/5.x/kibana.j2 new file mode 100644 index 000000000..0f946573d --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/kibana.j2 @@ -0,0 +1,170 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{ deploy_name }}" + labels: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" +spec: + replicas: {{ kibana_replicas | default(1) }} + selector: + provider: openshift + component: "{{ component }}" + logging-infra: "{{ logging_component }}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{ deploy_name }}" + labels: + logging-infra: "{{ logging_component }}" + provider: openshift + component: "{{ component }}" + spec: + serviceAccountName: aggregated-logging-kibana +{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %} + nodeSelector: +{% for key, value in kibana_node_selector.items() %} + {{ key }}: "{{ value }}" +{% endfor %} +{% endif %} + containers: + - + name: "kibana" + image: {{ image }} + imagePullPolicy: IfNotPresent +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + resources: +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %} + limits: +{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %} + cpu: "{{ kibana_cpu_limit }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %} + requests: +{% if kibana_cpu_request is not none and kibana_cpu_request != "" %} + cpu: "{{ kibana_cpu_request }}" +{% endif %} +{% if kibana_memory_limit is not none and kibana_memory_limit != "" %} + memory: "{{ kibana_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + env: + - name: "ES_URL" + value: "https://{{ es_host }}:{{ es_port }}" + - + name: "KIBANA_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana + resource: limits.memory + volumeMounts: + - name: kibana + mountPath: /etc/kibana/keys + readOnly: true + readinessProbe: + exec: + command: + - "/usr/share/kibana/probe/readiness.sh" + initialDelaySeconds: 5 + timeoutSeconds: 4 + periodSeconds: 5 + - + name: "kibana-proxy" + image: {{ proxy_image }} + imagePullPolicy: IfNotPresent +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + resources: +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %} + limits: +{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %} + cpu: "{{ kibana_proxy_cpu_limit }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %} + requests: +{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %} + cpu: "{{ kibana_proxy_cpu_request }}" +{% endif %} +{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %} + memory: "{{ kibana_proxy_memory_limit }}" +{% endif %} +{% endif %} +{% endif %} + ports: + - + name: "oaproxy" + containerPort: 3000 + env: + - + name: "OAP_BACKEND_URL" + value: "http://localhost:5601" + - + name: "OAP_AUTH_MODE" + value: "oauth2" + - + name: "OAP_TRANSFORM" + value: "user_header,token_header" + - + name: "OAP_OAUTH_ID" + value: kibana-proxy + - + name: "OAP_MASTER_URL" + value: {{ openshift_logging_kibana_master_url }} + - + name: "OAP_PUBLIC_MASTER_URL" + value: {{ openshift_logging_kibana_master_public_url }} + - + name: "OAP_LOGOUT_REDIRECT" + value: {{ openshift_logging_kibana_master_public_url }}/console/logout + - + name: "OAP_MASTER_CA_FILE" + value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + - + name: "OAP_DEBUG" + value: "{{ openshift_logging_kibana_proxy_debug }}" + - + name: "OAP_OAUTH_SECRET_FILE" + value: "/secret/oauth-secret" + - + name: "OAP_SERVER_CERT_FILE" + value: "/secret/server-cert" + - + name: "OAP_SERVER_KEY_FILE" + value: "/secret/server-key" + - + name: "OAP_SERVER_TLS_FILE" + value: "/secret/server-tls.json" + - + name: "OAP_SESSION_SECRET_FILE" + value: "/secret/session-secret" + - + name: "OCP_AUTH_PROXY_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: kibana-proxy + resource: limits.memory + volumeMounts: + - name: kibana-proxy + mountPath: /secret + readOnly: true + volumes: + - name: kibana + secret: + secretName: logging-kibana + - name: kibana-proxy + secret: + secretName: logging-kibana-proxy diff --git a/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 b/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 new file mode 100644 index 000000000..c80ff3d30 --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/oauth-client.j2 @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: OAuthClient +metadata: + name: kibana-proxy + labels: + logging-infra: support +secret: {{ secret }} +redirectURIs: +{% for host in kibana_hostnames %} +- {{ host }} +{% endfor %} +scopeRestrictions: +- literals: + - user:info + - user:check-access + - user:list-projects diff --git a/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 new file mode 100644 index 000000000..d2e8b8bcb --- /dev/null +++ b/roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.items() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_mux/files/fluent.conf b/roles/openshift_logging_mux/files/2.x/fluent.conf index bf61c9811..bf61c9811 100644 --- a/roles/openshift_logging_mux/files/fluent.conf +++ b/roles/openshift_logging_mux/files/2.x/fluent.conf diff --git a/roles/openshift_logging_mux/files/2.x/secure-forward.conf b/roles/openshift_logging_mux/files/2.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_mux/files/2.x/secure-forward.conf @@ -0,0 +1,26 @@ +# <store> +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key <SECRET_STRING> + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# <server> + # or IP +# host server.fqdn.example.com +# port 24284 +# </server> +# <server> + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# </server> +# </store> diff --git a/roles/openshift_logging_mux/files/5.x/fluent.conf b/roles/openshift_logging_mux/files/5.x/fluent.conf new file mode 100644 index 000000000..bf61c9811 --- /dev/null +++ b/roles/openshift_logging_mux/files/5.x/fluent.conf @@ -0,0 +1,37 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## + +<label @INGRESS> +## filters + @include configs.d/openshift/filter-pre-*.conf + @include configs.d/openshift/filter-retag-journal.conf + @include configs.d/openshift/filter-k8s-meta.conf + @include configs.d/openshift/filter-kibana-transform.conf + @include configs.d/openshift/filter-k8s-flatten-hash.conf + @include configs.d/openshift/filter-k8s-record-transform.conf + @include configs.d/openshift/filter-syslog-record-transform.conf + @include configs.d/openshift/filter-viaq-data-model.conf + @include configs.d/openshift/filter-post-*.conf +## +</label> + +<label @OUTPUT> +## matches + @include configs.d/openshift/output-pre-*.conf + @include configs.d/openshift/output-operations.conf + @include configs.d/openshift/output-applications.conf + # no post - applications.conf matches everything left +## +</label> diff --git a/roles/openshift_logging_mux/files/5.x/secure-forward.conf b/roles/openshift_logging_mux/files/5.x/secure-forward.conf new file mode 100644 index 000000000..87410c1c5 --- /dev/null +++ b/roles/openshift_logging_mux/files/5.x/secure-forward.conf @@ -0,0 +1,26 @@ +# <store> +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key <SECRET_STRING> + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key + # for private CA secret key +# ca_private_key_passphrase passphrase + +# <server> + # or IP +# host server.fqdn.example.com +# port 24284 +# </server> +# <server> + # ip address to connect +# host 203.0.113.8 + # specify hostlabel for FQDN verification if ipaddress is used for host +# hostlabel server.fqdn.example.com +# </server> +# </store> diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 7eba3cda4..b2699b285 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -86,12 +86,12 @@ # create Mux configmap - copy: - src: fluent.conf + src: "{{ __base_file_dir }}/fluent.conf" dest: "{{mktemp.stdout}}/fluent-mux.conf" changed_when: no - copy: - src: secure-forward.conf + src: "{{ __base_file_dir }}/secure-forward.conf" dest: "{{mktemp.stdout}}/secure-forward-mux.conf" changed_when: no @@ -170,7 +170,7 @@ # create Mux DC - name: Generating mux deploymentconfig template: - src: mux.j2 + src: "{{ __base_file_dir }}/mux.j2" dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml" vars: component: mux diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/2.x/mux.j2 index 2337c33d5..2337c33d5 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/2.x/mux.j2 diff --git a/roles/openshift_logging_mux/templates/5.x/mux.j2 b/roles/openshift_logging_mux/templates/5.x/mux.j2 new file mode 100644 index 000000000..2337c33d5 --- /dev/null +++ b/roles/openshift_logging_mux/templates/5.x/mux.j2 @@ -0,0 +1,202 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{mux_replicas|default(1)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-mux +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} + nodeSelector: +{% for key, value in mux_node_selector.items() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "mux" + image: {{image}} + imagePullPolicy: IfNotPresent +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + resources: +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + limits: +{% if mux_cpu_limit is not none %} + cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %} + requests: +{% if mux_cpu_request is not none %} + cpu: "{{mux_cpu_request}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} +{% endif %} + ports: + - containerPort: {{ openshift_logging_mux_port }} + name: mux-forward + volumeMounts: + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true + - name: filebufferstorage + mountPath: /var/lib/fluentd + env: + - name: "K8S_HOST_URL" + value: "{{openshift_logging_mux_master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_mux_app_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_mux_app_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_mux_app_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_mux_app_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_mux_app_ca}}" + - name: "OPS_HOST" + value: "{{openshift_logging_mux_ops_host}}" + - name: "OPS_PORT" + value: "{{openshift_logging_mux_ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_mux_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_mux_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_mux_ops_ca}}" + - name: "JOURNAL_SOURCE" + value: "{{openshift_logging_mux_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_mux_journal_read_from_head|lower}}" + - name: FORWARD_LISTEN_HOST + value: "{{ openshift_logging_mux_hostname }}" + - name: FORWARD_LISTEN_PORT + value: "{{ openshift_logging_mux_port }}" + - name: USE_MUX + value: "true" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_mux_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_mux_buffer_size_limit }}" + - name: "MUX_CPU_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.cpu + - name: "MUX_MEMORY_LIMIT" + valueFrom: + resourceFieldRef: + containerName: "mux" + resource: limits.memory + - name: "FILE_BUFFER_LIMIT" + value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}" + +{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %} + - name: USE_REMOTE_SYSLOG + value: "true" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_host is defined %} + - name: REMOTE_SYSLOG_HOST + value: "{{ openshift_logging_mux_remote_syslog_host }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_port is defined %} + - name: REMOTE_SYSLOG_PORT + value: "{{ openshift_logging_mux_remote_syslog_port }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_severity is defined %} + - name: REMOTE_SYSLOG_SEVERITY + value: "{{ openshift_logging_mux_remote_syslog_severity }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_facility is defined %} + - name: REMOTE_SYSLOG_FACILITY + value: "{{ openshift_logging_mux_remote_syslog_facility }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %} + - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX + value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_tag_key is defined %} + - name: REMOTE_SYSLOG_TAG_KEY + value: "{{ openshift_logging_mux_remote_syslog_tag_key }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_use_record is defined %} + - name: REMOTE_SYSLOG_USE_RECORD + value: "{{ openshift_logging_mux_remote_syslog_use_record }}" +{% endif %} + +{% if openshift_logging_mux_remote_syslog_payload_key is defined %} + - name: REMOTE_SYSLOG_PAYLOAD_KEY + value: "{{ openshift_logging_mux_remote_syslog_payload_key }}" +{% endif %} + + volumes: + - name: config + configMap: + name: logging-mux + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: muxcerts + secret: + secretName: logging-mux + - name: filebufferstorage +{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %} + persistentVolumeClaim: + claimName: {{ openshift_logging_mux_file_buffer_pvc_name }} +{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %} + hostPath: + path: "/var/log/fluentd" +{% else %} + emptydir: {} +{% endif %} diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index 02e417937..c5121c5b2 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -13,7 +13,7 @@ include_tasks: upgrade/stop_services.yml # Ensure actually install latest package. -- name: download docker upgrade rpm +- name: install docker upgrade rpm command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}" register: result until: result is succeeded diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index c0abd483b..e86de1eab 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -219,7 +219,7 @@ spec: - name: alertmanager args: - - -config.file=/etc/alertmanager/alertmanager.yml + - --config.file=/etc/alertmanager/alertmanager.yml image: "{{ l_openshift_prometheus_alertmanager_image_prefix }}prometheus-alertmanager:{{ l_openshift_prometheus_alertmanager_image_version }}" imagePullPolicy: IfNotPresent resources: diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml index 31f6c1bb1..2c46b5700 100644 --- a/roles/openshift_prometheus/vars/default_images.yml +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -8,5 +8,5 @@ l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertb # image version defaults l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0') }}" l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}" -l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}" +l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.13.0') }}" l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}" |