summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_service_broker/defaults/main.yml5
-rw-r--r--roles/ansible_service_broker/tasks/install.yml48
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml5
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker/tasks/package_docker.yml8
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml34
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml36
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_atomic/tasks/proxy.yml32
-rw-r--r--roles/openshift_cli/tasks/main.yml13
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py7
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py5
-rw-r--r--roles/openshift_logging/README.md18
-rw-r--r--roles/openshift_logging/defaults/main.yml36
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py19
-rw-r--r--roles/openshift_logging/filter_plugins/test34
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml9
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml5
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_curator/templates/curator.j215
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml5
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml11
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j27
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_logging_eventrouter/README.md6
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml3
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j26
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml5
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j223
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j238
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml13
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_mux/templates/mux.j219
-rw-r--r--roles/openshift_master/tasks/system_container.yml5
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml4
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml16
-rw-r--r--roles/openshift_node_dnsmasq/README.md27
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml1
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml1
-rw-r--r--roles/openshift_prometheus/README.md27
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml13
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml8
54 files changed, 477 insertions, 177 deletions
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index fa982d533..dc05b03b5 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -13,7 +13,4 @@ ansible_service_broker_launch_apb_on_bind: false
ansible_service_broker_image_pull_policy: IfNotPresent
ansible_service_broker_sandbox_role: edit
-ansible_service_broker_auto_escalate: true
-ansible_service_broker_registry_tag: latest
-ansible_service_broker_registry_whitelist:
- - '.*-apb$'
+ansible_service_broker_auto_escalate: false
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index c0384b7c4..66c3d9cc4 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -22,23 +22,14 @@
ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
-
- ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+ ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
+ ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
- name: set ansible-service-broker image facts using set prefix and tag
set_fact:
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
-- set_fact:
- openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
- when: openshift_master_config_dir is undefined
-
-- slurp:
- src: "{{ openshift_master_config_dir }}/service-signer.crt"
- register: catalog_ca
-
-
- include: validate_facts.yml
@@ -83,13 +74,12 @@
state: present
name: asb-access
rules:
- - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"]
+ - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"]
verbs: ["get", "post", "put", "patch", "delete"]
- name: Bind admin cluster-role to asb serviceaccount
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: admin
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -97,7 +87,6 @@
- name: Bind auth cluster role to asb service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-auth
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -105,7 +94,6 @@
- name: Bind asb-access role to asb-client service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-access
user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
@@ -113,6 +101,7 @@
- name: create asb-client token secret
oc_obj:
name: asb-client
+ namespace: openshift-ansible-service-broker
state: present
kind: Secret
content:
@@ -122,10 +111,20 @@
kind: Secret
metadata:
name: asb-client
+ namespace: openshift-ansible-service-broker
annotations:
kubernetes.io/service-account.name: asb-client
type: kubernetes.io/service-account-token
+- oc_secret:
+ state: list
+ namespace: openshift-ansible-service-broker
+ name: asb-client
+ register: asb_client_secret
+
+- set_fact:
+ service_ca_crt: asb_client_secret.results.results.0.data['service-ca.crt']
+
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
- name: create ansible-service-broker service
@@ -141,6 +140,7 @@
kind: Service
metadata:
name: asb
+ namespace: openshift-ansible-service-broker
labels:
app: openshift-ansible-service-broker
service: asb
@@ -235,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -340,11 +354,11 @@
metadata:
name: ansible-service-broker
spec:
- url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+ url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
authInfo:
bearer:
secretRef:
name: asb-client
namespace: openshift-ansible-service-broker
kind: Secret
- caBundle: "{{ catalog_ca.content }}"
+ caBundle: "{{ service_ca_crt }}"
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 3e9639adf..8438e993f 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -13,3 +13,5 @@ __ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: latest
+__ansible_service_broker_registry_whitelist: []
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index 9c576cb76..fc58b4fd8 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: v3.6
+__ansible_service_broker_image_tag: v3.7
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
@@ -14,3 +14,6 @@ __ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: v3.7
+__ansible_service_broker_registry_whitelist:
+ - '.*-apb$'
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index f73f90686..5ea73568a 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -10,6 +10,15 @@
l_use_crio: "{{ openshift_use_crio | default(False) }}"
l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not l_use_crio_only
+
- name: Use Package Docker if Requested
include: package_docker.yml
when:
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 7ccab37a5..d6aee0513 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -52,14 +52,6 @@
- restart docker
when: not (os_firewall_use_firewalld | default(False)) | bool
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
-
- stat: path=/etc/sysconfig/docker
register: docker_check
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index a79600930..13bbd359e 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -82,36 +82,10 @@
enabled: yes
state: restarted
-
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
-
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 15c6a55db..726e8ada7 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -68,38 +68,10 @@
retries: 3
delay: 30
-
-# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
-# regexp: the line starts with or without #, followed by the string
-# http_proxy, then either : or =
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 024479fb4..9a6951920 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -2,6 +2,11 @@
- set_fact:
l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
register: pull_result
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
new file mode 100644
index 000000000..8c10c9991
--- /dev/null
+++ b/roles/openshift_atomic/README.md
@@ -0,0 +1,28 @@
+OpenShift Atomic
+================
+
+This role houses atomic specific tasks.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+```
+- name: Ensure atomic proxies are defined
+ hosts: localhost
+ roles:
+ - role: openshift_atomic
+```
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
new file mode 100644
index 000000000..ea129f514
--- /dev/null
+++ b/roles/openshift_atomic/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Atomic related tasks
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/openshift_atomic/tasks/proxy.yml
new file mode 100644
index 000000000..dde099984
--- /dev/null
+++ b/roles/openshift_atomic/tasks/proxy.yml
@@ -0,0 +1,32 @@
+---
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 9e61805f9..14d8a3325 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,9 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
+ l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}"
+- set_fact:
+ l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
@@ -20,23 +23,23 @@
backend: "docker"
when:
- openshift.common.is_containerized | bool
- - not l_use_crio
+ - not l_use_cli_atomic_image | bool
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}"
+ image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- openshift.common.is_containerized | bool
- - l_use_crio
+ - l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
openshift_facts:
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 326176273..3ee3b132c 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -101,7 +101,8 @@ class ActionModule(ActionBase):
execute_module=self._execute_module,
tmp=tmp,
task_vars=task_vars,
- want_full_results=want_full_results
+ want_full_results=want_full_results,
+ templar=self._templar
)
return known_checks
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index ce05b44a4..b7b16e0ea 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -65,12 +65,15 @@ class OpenShiftCheck(object):
If the check can gather logs, tarballs, etc., do so when True; but no need to spend
the time if they're not wanted (won't be written to output directory).
"""
-
- def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # pylint: disable=too-many-arguments
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False,
+ templar=None):
# store a method for executing ansible modules from the check
self._execute_module = execute_module
# the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
+ # We may need to template some task_vars
+ self._templar = templar
self.tmp = tmp
# a boolean for disabling the gathering of results (files, computations) that won't
# actually be recorded/used
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 7c8ac78fe..5beb20503 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -61,10 +61,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# for the oreg_url registry there may be credentials specified
components = self.get_var("oreg_url", default="").split('/')
self.registries["oreg"] = "" if len(components) < 3 else components[0]
+
+ # Retrieve and template registry credentials, if provided
self.skopeo_command_creds = ""
oreg_auth_user = self.get_var('oreg_auth_user', default='')
oreg_auth_password = self.get_var('oreg_auth_password', default='')
if oreg_auth_user != '' and oreg_auth_password != '':
+ if self._templar is not None:
+ oreg_auth_user = self._templar.template(oreg_auth_user)
+ oreg_auth_password = self._templar.template(oreg_auth_password)
self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 69eb9283d..280d7d24c 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -44,23 +44,23 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
-- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_cpu_request`: The minimum amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
-- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
-- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
-- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
@@ -80,7 +80,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
-- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_cpu_request`: The minimum amount of CPU to allocate for an ES pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
@@ -107,7 +107,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
- `openshift_logging_es_ops_cluster_size`: 1
-- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_cpu_request`: The minimum amount of CPU to allocate for an ES ops pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_ops_memory_limit`: 8Gi
- `openshift_logging_es_ops_pvc_dynamic`: False
- `openshift_logging_es_ops_pvc_size`: ""
@@ -115,9 +115,9 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
-- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
-- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
@@ -176,7 +176,7 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
-- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
first value in the list is the namespace to use for undefined projects,
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 6e7e2557f..626732d16 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -18,20 +18,24 @@ openshift_logging_curator_run_minute: 0
openshift_logging_curator_run_timezone: UTC
openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_memory_limit: 256Mi
+openshift_logging_curator_cpu_request: 100m
openshift_logging_curator_nodeselector: {}
-openshift_logging_curator_ops_cpu_limit: 100m
-openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_cpu_limit: null
+openshift_logging_curator_ops_memory_limit: 256Mi
+openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: 256Mi
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -53,9 +57,11 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
+openshift_logging_kibana_ops_cpu_request: 100m
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
+openshift_logging_kibana_ops_proxy_cpu_request: 100m
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -71,13 +77,14 @@ openshift_logging_kibana_ops_key: ""
openshift_logging_kibana_ops_ca: ""
openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_journal_source: ""
openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
-openshift_logging_fluentd_buffer_queue_limit: 1024
-openshift_logging_fluentd_buffer_size_limit: 1m
+openshift_logging_fluentd_buffer_queue_limit: 32
+openshift_logging_fluentd_buffer_size_limit: 8m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -85,7 +92,8 @@ openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: 1
-openshift_logging_es_cpu_limit: 1000m
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_cpu_request: "1"
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "8Gi"
@@ -98,8 +106,6 @@ openshift_logging_es_storage_group: "65534"
openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
-openshift_logging_es_number_of_shards: 1
-openshift_logging_es_number_of_replicas: 0
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
@@ -126,8 +132,9 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
-openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_cpu_request: "1"
openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
@@ -160,8 +167,9 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 1Gi
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_request: 100m
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 330e7e59a..959573635 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -17,6 +17,22 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def walk(source, path, default, delimiter='.'):
+ '''Walk the sourch hash given the path and return the value or default if not found'''
+ if not isinstance(source, dict):
+ raise RuntimeError('The source is not a walkable dict: {} path: {}'.format(source, path))
+ keys = path.split(delimiter)
+ max_depth = len(keys)
+ cur_depth = 0
+ while cur_depth < max_depth:
+ if keys[cur_depth] in source:
+ source = source[keys[cur_depth]]
+ cur_depth = cur_depth + 1
+ else:
+ return default
+ return source
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -73,5 +89,6 @@ class FilterModule(object):
'map_from_pairs': map_from_pairs,
'es_storage': es_storage,
'serviceaccount_name': serviceaccount_name,
- 'serviceaccount_namespace': serviceaccount_namespace
+ 'serviceaccount_namespace': serviceaccount_namespace,
+ 'walk': walk
}
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
new file mode 100644
index 000000000..3ad956cca
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/test
@@ -0,0 +1,34 @@
+import unittest
+from openshift_logging import walk
+
+class TestFilterMethods(unittest.TestCase):
+
+
+ def test_walk_find_key(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.xyz', 123, delimiter='#'), 'myvalue')
+
+
+ def test_walk_return_default(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc', 123, delimiter='#'), 123)
+
+
+ def test_walk_limit_max_depth(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc#dontfindme', 123, delimiter='#'), 123)
+
+ def test_complex_hash(self):
+ source = {
+ 'elasticsearch': {
+ 'configmaps': {
+ 'logging-elasticsearch': {
+ 'elasticsearch.yml': "a string value"
+ }
+ }
+ }
+ }
+ self.assertEquals(walk(source,'elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', 123, delimiter='#'), "a string value")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index f10df8da5..98d0d1c4f 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -207,7 +207,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
- a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 21fd79c28..76627acf2 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -78,6 +78,7 @@
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
@@ -133,6 +134,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if item.0.nodeSelector | default(None) is none else item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if item.0.storageGroups | default([]) | length == 0 else item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
@@ -141,7 +143,10 @@
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
+ openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
_es_containers: "{{item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
@@ -167,6 +172,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
@@ -207,11 +213,13 @@
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
@@ -243,6 +251,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
index 17807b644..9cae9f936 100644
--- a/roles/openshift_logging_curator/defaults/main.yml
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -9,8 +9,9 @@ openshift_logging_curator_namespace: logging
### Common settings
openshift_logging_curator_nodeselector: ""
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_cpu_request: 100m
+openshift_logging_curator_memory_limit: 256Mi
openshift_logging_curator_es_host: "logging-es"
openshift_logging_curator_es_port: 9200
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8fab2b5..b4ddf45d9 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,6 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index e74918a40..e71393643 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -39,13 +39,26 @@ spec:
name: "curator"
image: {{image}}
imagePullPolicy: Always
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
resources:
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
limits:
+{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
requests:
+{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+ cpu: "{{curator_cpu_request}}"
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
{% endif %}
env:
-
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index fc48b7f71..9fc6fd1d8 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -6,7 +6,8 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_
openshift_logging_elasticsearch_namespace: logging
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
-openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('1000m') }}"
+openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('') }}"
+openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_cpu_request | default('1000m') }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
@@ -40,7 +41,7 @@ openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
-openshift_logging_elasticsearch_proxy_cpu_limit: "100m"
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index aeff2d198..7aabdc861 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -164,13 +164,17 @@
when: es_logging_contents is undefined
changed_when: no
+- set_fact:
+ __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
+ __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
+
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
@@ -349,7 +353,8 @@
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
- es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index ce3b2eb83..7966d219e 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -69,9 +69,9 @@ spec:
readOnly: true
resources:
limits:
- cpu: "{{openshift_logging_elasticsearch_proxy_cpu_limit }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
-
name: "elasticsearch"
@@ -79,11 +79,12 @@ spec:
imagePullPolicy: Always
resources:
limits:
- memory: "{{es_memory_limit}}"
-{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
cpu: "{{es_cpu_limit}}"
{% endif %}
+ memory: "{{es_memory_limit}}"
requests:
+ cpu: "{{es_cpu_request}}"
memory: "{{es_memory_limit}}"
{% if es_container_security_context %}
securityContext: {{ es_container_security_context | to_yaml }}
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
index c87d48e27..2fd960bb5 100644
--- a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
-__openshift_logging_elasticsearch_proxy_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
index da313d68b..611bdaee0 100644
--- a/roles/openshift_logging_eventrouter/README.md
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -3,9 +3,9 @@ Event router
A pod forwarding kubernetes events to EFK aggregated logging stack.
-- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** is deployed to default project, has a service account and its own role to read events
- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
-- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+- **fluentd** ingests as logs from **eventrouter** container (as it would any other container), and writes them to the appropriate index for the **eventrouter**'s namespace (in the 'default' namespace, the *.operations* index is used)
- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
@@ -15,6 +15,6 @@ Configuration variables:
- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
-- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_cpu_request`: The minimum amount of CPU to allocate to eventrouter. Defaults to '100m'.
- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
index 34e33f75f..4c0350c98 100644
--- a/roles/openshift_logging_eventrouter/defaults/main.yaml
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -4,6 +4,7 @@ openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version
openshift_logging_eventrouter_replicas: 1
openshift_logging_eventrouter_sink: stdout
openshift_logging_eventrouter_nodeselector: ""
-openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_cpu_limit: null
+openshift_logging_eventrouter_cpu_request: 100m
openshift_logging_eventrouter_memory_limit: 128Mi
openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index 8df7435e2..cbbc6a8ec 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -45,7 +45,7 @@
params:
IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
- CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_request }}"
MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
SINK: "{{ openshift_logging_eventrouter_sink }}"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index ea1fd3efd..7fdf959d3 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -25,7 +25,7 @@ objects:
metadata:
name: logging-eventrouter
data:
- config.json: |-
+ config.json: |-
{
"sink": "${SINK}"
}
@@ -65,9 +65,9 @@ objects:
imagePullPolicy: Always
resources:
limits:
- memory: ${MEMORY}
- cpu: ${CPU}
+ memory: ${MEMORY}
requires:
+ cpu: ${CPU}
memory: ${MEMORY}
volumeMounts:
- name: config-volume
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 25f7580a4..861935c99 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -8,7 +8,8 @@ openshift_logging_fluentd_namespace: logging
### Common settings
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_hosts: ['--all']
@@ -55,7 +56,7 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
-openshift_logging_fluentd_file_buffer_limit: 1Gi
+openshift_logging_fluentd_file_buffer_limit: 256Mi
# Configure fluentd to tail audit log file and filter out container engine's logs from there
# These logs are then stored in ES operation index
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 06bb35dbc..f56810610 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -172,6 +172,9 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+ fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 644b70031..b07175a50 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -32,12 +32,27 @@ spec:
imagePullPolicy: Always
securityContext:
privileged: true
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
resources:
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %}
limits:
- cpu: {{ openshift_logging_fluentd_cpu_limit }}
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_limit is not none %}
+ cpu: "{{fluentd_cpu_limit}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
requests:
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_request is not none %}
+ cpu: "{{fluentd_cpu_request}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% endif %}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
@@ -115,7 +130,7 @@ spec:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
- value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}"
{% if openshift_logging_mux_client_mode is defined and
((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
(openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index ee265bb14..1366e96cd 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -9,6 +9,7 @@ openshift_logging_kibana_namespace: logging
openshift_logging_kibana_nodeselector: ""
openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
@@ -28,6 +29,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_proxy_memory_limit: 256Mi
#The absolute path on the control node to the cert file to use
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index e17e8c1f2..809f7a631 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,8 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index da1386d3e..329ccbde2 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -38,17 +38,26 @@ spec:
name: "kibana"
image: {{ image }}
imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
resources:
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
limits:
-{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
cpu: "{{ kibana_cpu_limit }}"
-{% endif %}
-{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
requests:
+{% if kibana_cpu_request is not none and kibana_cpu_request != "" %}
+ cpu: "{{ kibana_cpu_request }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
env:
- name: "ES_HOST"
@@ -76,17 +85,26 @@ spec:
name: "kibana-proxy"
image: {{ proxy_image }}
imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
resources:
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
limits:
-{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
cpu: "{{ kibana_proxy_cpu_limit }}"
-{% endif %}
-{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
requests:
+{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %}
+ cpu: "{{ kibana_proxy_cpu_request }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 68412aec8..9de686576 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -9,10 +9,11 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 2Gi
-openshift_logging_mux_buffer_queue_limit: 1024
-openshift_logging_mux_buffer_size_limit: 1m
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_cpu_request: 100m
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_buffer_queue_limit: 32
+openshift_logging_mux_buffer_size_limit: 8m
openshift_logging_mux_replicas: 1
@@ -57,11 +58,11 @@ openshift_logging_mux_file_buffer_storage_type: "emptydir"
openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc"
# required if the PVC does not already exist
-openshift_logging_mux_file_buffer_pvc_size: 4Gi
+openshift_logging_mux_file_buffer_pvc_size: 1Gi
openshift_logging_mux_file_buffer_pvc_dynamic: false
openshift_logging_mux_file_buffer_pvc_pv_selector: {}
openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_mux_file_buffer_storage_group: '65534'
openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
-openshift_logging_mux_file_buffer_limit: 2Gi
+openshift_logging_mux_file_buffer_limit: 256Mi
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 2ec863afa..1b46a7ac3 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,6 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 4cc48139f..7e88e3964 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -37,17 +37,26 @@ spec:
- name: "mux"
image: {{image}}
imagePullPolicy: Always
-{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
resources:
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
limits:
-{% if mux_cpu_limit is not none %}
+{% if mux_cpu_limit is not none %}
cpu: "{{mux_cpu_limit}}"
-{% endif %}
-{% if mux_memory_limit is not none %}
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
requests:
+{% if mux_cpu_request is not none %}
+ cpu: "{{mux_cpu_request}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
- containerPort: "{{ openshift_logging_mux_port }}"
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 91332acfb..843352532 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
index f6ad438aa..2252c003a 100644
--- a/roles/openshift_master/tasks/upgrade_facts.yml
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -21,6 +21,10 @@
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
when: oreg_host is not defined
+- set_fact:
+ oreg_auth_credentials_replace: False
+ when: oreg_auth_credentials_replace is not defined
+
- name: Set openshift_master_debug_level
set_fact:
openshift_master_debug_level: "{{ debug_level | default(2) }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 7ec26ceb7..3f7a528a9 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -21,7 +21,7 @@ AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
-{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%}
+{% for key, value in (openshift.master.api_env_vars | default({})).items() | union((openshift.master.controllers_env_vars | default({})).items()) -%}
{{ key }}={{ value }}
{% endfor -%}
{% endif -%}
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 20d7a9539..164a79b39 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index e09063aa5..0f73ce454 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,6 +10,11 @@
l_service_name: "{{ openshift.docker.service_name }}"
when: not l_use_crio
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 4abe8bcaf..ef66bf9ca 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -2,9 +2,21 @@
- name: update ca trust
command: update-ca-trust
notify:
- - restart docker after updating ca trust
+ - check for container runtime after updating ca trust
-- name: restart docker after updating ca trust
+- name: check for container runtime after updating ca trust
+ command: >
+ systemctl -q is-active {{ openshift.docker.service_name }}.service
+ register: l_docker_installed
+ # An rc of 0 indicates that the container runtime service is
+ # running. We will restart it by notifying the restart handler since
+ # we have updated the system CA trust.
+ changed_when: l_docker_installed.rc == 0
+ failed_when: false
+ notify:
+ - restart container runtime after updating ca trust
+
+- name: restart container runtime after updating ca trust
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md
new file mode 100644
index 000000000..4596190d7
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/README.md
@@ -0,0 +1,27 @@
+OpenShift Node DNS resolver
+===========================
+
+Configure dnsmasq to act as a DNS resolver for an OpenShift node.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------|
+| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance |
+
+Dependencies
+------------
+
+* openshift_common
+* openshift_node_facts
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index ed97d539c..eae832fcf 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1 +1,2 @@
---
+openshift_node_dnsmasq_install_network_manager_hook: true
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
index dddcfc9da..e5a92a630 100644
--- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -5,5 +5,6 @@
dest: /etc/NetworkManager/dispatcher.d/
mode: 0755
notify: restart NetworkManager
+ when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
- meta: flush_handlers
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index c5a44bffb..ce72930ed 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -23,8 +23,8 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
-## Storage related variables
-Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+## PVC related variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
@@ -37,6 +37,29 @@ openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
```
+## NFS PV Storage variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set nfs pv by setting corresponding variable:
+```
+openshift_prometheus_<COMPONENT>_storage_kind=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_(access_modes|host|labels)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_volume_(name|size)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_nfs_(directory|options)=<VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_kind=nfs
+openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+openshift_prometheus_storage_host=nfs.example.com #for external host
+openshift_prometheus_storage_nfs_directory=/exports
+openshift_prometheus_storage_alertmanager_nfs_options='*(rw,root_squash)'
+openshift_prometheus_storage_volume_name=prometheus
+openshift_prometheus_storage_alertbuffer_volume_size=10Gi
+openshift_prometheus_storage_labels={'storage': 'prometheus'}
+```
+
+NOTE: Setting `openshift_prometheus_<COMPONENT>_storage_labels` overrides `openshift_prometheus_<COMPONENT>_pvc_pv_selector`
+
+
## Additional Alert Rules file variable
An external file with alert rules can be added by setting path to additional rules variable:
```
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index e327ee9f5..74c1a51a8 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -54,3 +54,16 @@
- include: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
+
+- name: Ensure clusterid is set along with the cloudprovider
+ fail:
+ msg: >
+ Ensure that the openshift_clusterid is set and that all infrastructure has the required tags.
+
+ For dynamic provisioning when using multiple clusters in different zones, tag each node with Key=kubernetes.io/cluster/xxxx,Value=clusterid where xxxx and clusterid are unique per cluster. In versions prior to 3.6, this was Key=KubernetesCluster,Value=clusterid.
+
+ https://github.com/openshift/openshift-docs/blob/master/install_config/persistent_storage/dynamically_provisioning_pvs.adoc#available-dynamically-provisioned-plug-ins
+ when:
+ - openshift_clusterid is not defined
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index 39bf1780a..b70ab90a1 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -11,6 +11,14 @@
will not function. This also means that NetworkManager must be installed
enabled and responsible for management of the primary interface.
+- name: Ensure that openshift_node_dnsmasq_install_network_manager_hook is true
+ when:
+ - not openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
+ fail:
+ msg: |-
+ The NetworkManager hook is considered a critical part of the DNS
+ infrastructure.
+
- set_fact:
__using_dynamic: True
when: