summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml2
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j28
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j22
-rw-r--r--roles/openshift_logging/README.md3
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml48
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml27
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml2
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml27
-rw-r--r--roles/openshift_logging/templates/curator.j26
-rw-r--r--roles/openshift_logging/templates/es.j26
-rw-r--r--roles/openshift_logging/templates/kibana.j26
-rw-r--r--roles/openshift_metrics/tasks/main.yaml7
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml4
15 files changed, 73 insertions, 79 deletions
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index e9bc8b4ab..68bb4ace8 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -17,7 +17,7 @@
- name: Create the systemd unit files
template:
src: "haproxy.docker.service.j2"
- dest: "{{ containerized_svc_dir }}/haproxy.service"
+ dest: "/etc/systemd/system/haproxy.service"
when: openshift.common.is_containerized | bool
notify: restart haproxy
diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index 79e695001..24fd635ec 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -1,16 +1,20 @@
# Global settings
#---------------------------------------------------------------------
global
+ maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }}
+ log /dev/log local0 info
+{% if openshift.common.is_containerized | bool %}
+ stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin
+{% else %}
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
- maxconn {{ openshift_loadbalancer_global_maxconn | default(20000) }}
user haproxy
group haproxy
daemon
- log /dev/log local0 info
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
+{% endif %}
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 624876ab0..5385df3b7 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -5,7 +5,7 @@ PartOf=docker.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
-ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }}
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop openshift_loadbalancer
LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 9b71dc676..856cfa2b9 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -35,6 +35,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
+- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
@@ -43,6 +44,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
+- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
@@ -67,6 +69,7 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
+- `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
same as above for their non-ops counterparts, but apply to the OPS cluster instance:
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 20e50482e..740e490e1 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -88,56 +88,12 @@
- name: Creating necessary JKS certs
include: generate_jks.yaml
-# check for secret/logging-kibana-proxy
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}'
- register: kibana_secret_oauth_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}'
- register: kibana_secret_session_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-# check for oauthclient secret
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}'
- register: oauth_secret_check
- ignore_errors: yes
- changed_when: no
- check_mode: no
-
-# set or generate as needed
+# TODO: make idempotent
- name: Generate proxy session
set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
check_mode: no
- when:
- - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == ''
-
-- name: Generate proxy session
- set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }}
- check_mode: no
- when:
- - kibana_secret_session_check.stdout is defined
- - kibana_secret_session_check.stdout != ''
+# TODO: make idempotent
- name: Generate oauth client secret
set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
check_mode: no
- when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == ''
- or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == ''
- or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout
-
-- name: Generate oauth client secret
- set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}}
- check_mode: no
- when:
- - kibana_secret_oauth_check is defined
- - kibana_secret_oauth_check.stdout != ''
- - oauth_secret_check.stdout is defined
- - oauth_secret_check.stdout != ''
- - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
index adb6c2b2d..c6e2ccbc0 100644
--- a/roles/openshift_logging/tasks/generate_jks.yaml
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -27,34 +27,22 @@
check_mode: no
- name: Create placeholder for previously created JKS certs to prevent recreating...
- file:
- path: "{{local_tmp.stdout}}/elasticsearch.jks"
- state: touch
- mode: "u=rw,g=r,o=r"
+ local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
when: elasticsearch_jks.stat.exists
changed_when: False
- name: Create placeholder for previously created JKS certs to prevent recreating...
- file:
- path: "{{local_tmp.stdout}}/logging-es.jks"
- state: touch
- mode: "u=rw,g=r,o=r"
+ local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
when: logging_es_jks.stat.exists
changed_when: False
- name: Create placeholder for previously created JKS certs to prevent recreating...
- file:
- path: "{{local_tmp.stdout}}/system.admin.jks"
- state: touch
- mode: "u=rw,g=r,o=r"
+ local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
when: system_admin_jks.stat.exists
changed_when: False
- name: Create placeholder for previously created JKS certs to prevent recreating...
- file:
- path: "{{local_tmp.stdout}}/truststore.jks"
- state: touch
- mode: "u=rw,g=r,o=r"
+ local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
when: truststore_jks.stat.exists
changed_when: False
@@ -69,15 +57,16 @@
- ca.serial.txt
- ca.crl.srl
- ca.db
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf
vars:
- top_dir: "{{local_tmp.stdout}}"
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- name: Run JKS generation script
local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
check_mode: no
- become: yes
when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
- name: Pushing locally generated JKS certs to remote host...
@@ -105,7 +94,5 @@
when: not truststore_jks.stat.exists
- name: Cleaning up temp dir
- file:
- path: "{{local_tmp.stdout}}"
- state: absent
+ local_action: file path="{{local_tmp.stdout}}" state=absent
changed_when: False
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
index 8f2825552..fcfce4e1e 100644
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -31,6 +31,7 @@
curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
replicas: "{{curator_replica_count.stdout | default (0)}}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}"
check_mode: no
changed_when: no
@@ -46,6 +47,7 @@
curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
+ curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}"
when: openshift_logging_use_ops
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index fbba46a35..9b1c004f2 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -33,6 +33,7 @@
volume_names: "{{es_pvc_pool | default([])}}"
pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
deploy_name: "{{item.1}}"
+ es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
with_indexed_items:
- "{{es_dc_pool | default([])}}"
check_mode: no
@@ -98,6 +99,7 @@
es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
with_indexed_items:
- "{{es_dc_pool_ops | default([])}}"
when:
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
index de4b018dd..f4df7de0c 100644
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -35,6 +35,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
replicas: "{{kibana_replica_count.stdout | default (0)}}"
+ kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}"
check_mode: no
changed_when: no
@@ -53,6 +54,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
+ kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}"
when: openshift_logging_use_ops
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
index aecb5d81b..bd5073381 100644
--- a/roles/openshift_logging/tasks/label_node.yaml
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -1,11 +1,34 @@
---
- command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ -o jsonpath='{.metadata.labels}'
+ register: node_labels
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}}
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - node_labels.stdout is defined
+ - label not in node_labels.stdout
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
-o jsonpath='{.metadata.labels.{{ label }}}'
register: label_value
- failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
- when: not ansible_check_mode
+ ignore_errors: yes
changed_when: no
+ when:
+ - value is defined
+ - node_labels.stdout is defined
+ - label in node_labels.stdout
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
- command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index d3b5d33a2..de6258eaa 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -28,6 +28,12 @@ spec:
spec:
terminationGracePeriod: 600
serviceAccountName: aggregated-logging-curator
+{% if curator_node_selector is iterable and curator_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in curator_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "curator"
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 291589690..ec84c6b76 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -30,6 +30,12 @@ spec:
securityContext:
supplementalGroups:
- {{openshift_logging_es_storage_group}}
+{% if es_node_selector is iterable and es_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in es_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "elasticsearch"
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index 1ec97701a..b42f62850 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -27,6 +27,12 @@ spec:
component: "{{component}}"
spec:
serviceAccountName: aggregated-logging-kibana
+{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in kibana_node_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
containers:
-
name: "kibana"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c42440130..1808db5d5 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -7,6 +7,7 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
+ when: "{{ openshift_metrics_install_metrics | bool }}"
- name: Copy the admin client config(s)
command: >
@@ -15,8 +16,4 @@
check_mode: no
tags: metrics_init
-- include: install_metrics.yaml
- when: openshift_metrics_install_metrics | default(false) | bool
-
-- include: uninstall_metrics.yaml
- when: not openshift_metrics_install_metrics | default(false) | bool
+- include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}"
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 291df6822..41673ee40 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -7,7 +7,7 @@
when: deployment_type == 'enterprise'
- set_fact:
- default_ose_version: '3.3'
+ default_ose_version: '3.4'
when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
- set_fact:
@@ -16,7 +16,7 @@
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3'] )
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] )
- name: Enable RHEL repositories
command: subscription-manager repos \