summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/cockpit-ui/meta/main.yml2
-rw-r--r--roles/cockpit-ui/tasks/main.yml142
-rw-r--r--roles/openshift_facts/tasks/main.yml104
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml16
-rw-r--r--roles/openshift_hosted_templates/tasks/main.yml2
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml1
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml52
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml114
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml107
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml25
-rw-r--r--roles/openshift_manage_node/meta/main.yml15
-rw-r--r--roles/openshift_manage_node/tasks/main.yml66
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml7
-rw-r--r--roles/openshift_metrics/defaults/main.yaml2
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml6
16 files changed, 317 insertions, 346 deletions
diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml
index 6ad2e324a..4d619fff6 100644
--- a/roles/cockpit-ui/meta/main.yml
+++ b/roles/cockpit-ui/meta/main.yml
@@ -11,3 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index f2ef4f161..8bd68787a 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -1,86 +1,58 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
-- set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- changed_when: False
-
-- name: Determine if docker-registry service exists
- command: >
- {{ openshift.common.client_binary }} get svc/docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: check_docker_registry_exists
- failed_when: false
- changed_when: false
-
-- name: Create passthrough route for docker-registry
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_docker_registry_route
- changed_when: "'already exists' not in create_docker_registry_route.stderr"
- failed_when: "'already exists' not in create_docker_registry_route.stderr and create_docker_registry_route.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Create passthrough route for registry-console
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service registry-console
- --port registry-console
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_registry_console_route
- changed_when: "'already exists' not in create_registry_console_route.stderr"
- failed_when: "'already exists' not in create_registry_console_route.stderr and create_registry_console_route.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Retrieve docker-registry route
- command: >
- {{ openshift.common.client_binary }} get route docker-registry
- -o jsonpath='{.spec.host}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_route
- changed_when: false
- when: check_docker_registry_exists.rc == 0
-
-- name: Retrieve cockpit kube url
- command: >
- {{ openshift.common.client_binary }} get route registry-console
- -o jsonpath='https://{.spec.host}'
- -n default
- register: registry_console_cockpit_kube_url
- changed_when: false
- when: check_docker_registry_exists.rc == 0
-
-# TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX
-- name: Deploy registry-console
- command: >
- {{ openshift.common.client_binary }} new-app --template=registry-console
- {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
- {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
- -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
- -p REGISTRY_HOST="{{ docker_registry_route.stdout }}"
- -p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}"
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: deploy_registry_console
- changed_when: "'already exists' not in deploy_registry_console.stderr"
- failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
- when: check_docker_registry_exists.rc == 0
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- block:
+ - name: Create passthrough route for docker-registry
+ oc_route:
+ kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
+ name: docker-registry
+ namespace: default
+ service_name: docker-registry
+ state: present
+ tls_termination: passthrough
+ register: docker_registry_route
+
+ - name: Create passthrough route for registry-console
+ oc_route:
+ kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig"
+ name: registry-console
+ namespace: default
+ service_name: registry-console
+ state: present
+ tls_termination: passthrough
+ register: registry_console_cockpit_kube
+
+ # XXX: Required for items still using command
+ - name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+ - set_fact:
+ openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+
+ - name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
+ changed_when: False
+
+ # TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX
+ - name: Deploy registry-console
+ command: >
+ {{ openshift.common.client_binary }} new-app --template=registry-console
+ {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
+ {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
+ -p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
+ -p REGISTRY_HOST="{{ docker_registry_route.results.results[0].spec.host }}"
+ -p COCKPIT_KUBE_URL="https://{{ registry_console_cockpit_kube.results.results[0].spec.host }}"
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: deploy_registry_console
+ changed_when: "'already exists' not in deploy_registry_console.stderr"
+ failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
+
+ - name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ # XXX: End required for items still using command
+ run_once: true
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 9a1982076..11bd68207 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,52 +1,64 @@
---
-- name: Detecting Operating System
- stat:
- path: /run/ostree-booted
- register: ostree_booted
+- block:
+ - name: Detecting Operating System
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
-# Locally setup containerized facts for now
-- set_fact:
- l_is_atomic: "{{ ostree_booted.stat.exists }}"
-- set_fact:
- l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
- l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
- l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
- l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
- l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+ # Locally setup containerized facts for now
+ - set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+ - set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}"
+ l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
+ l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
+ l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
-- name: Ensure various deps are installed
- package: name={{ item }} state=present
- with_items: "{{ required_packages }}"
- when: not l_is_atomic | bool
+ - name: Ensure various deps are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_packages }}"
+ when: not l_is_atomic | bool
-- name: Gather Cluster facts and set is_containerized if needed
- openshift_facts:
- role: common
- local_facts:
- debug_level: "{{ openshift_debug_level | default(2) }}"
- # TODO: Deprecate deployment_type in favor of openshift_deployment_type
- deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- cluster_id: "{{ openshift_cluster_id | default('default') }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- is_containerized: "{{ l_is_containerized | default(None) }}"
- is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
- is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
- is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
- is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
- system_images_registry: "{{ system_images_registry | default('') }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
+ role: common
+ local_facts:
+ debug_level: "{{ openshift_debug_level | default(2) }}"
+ # TODO: Deprecate deployment_type in favor of openshift_deployment_type
+ deployment_type: "{{ openshift_deployment_type | default(deployment_type) }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ system_images_registry: "{{ system_images_registry | default('') }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-- name: Set repoquery command
+ - name: Set repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ # This `when` allows us to skip this expensive block of tasks on
+ # subsequent calls to the `openshift_facts` role. You will notice
+ # speed-ups in proportion to the size of your cluster as this will
+ # skip all tasks on the next calls to the `openshift_facts` role.
+ when:
+ - openshift_facts_init is not defined
+
+- name: Record that openshift_facts has initialized
set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+ openshift_facts_init: true
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 556da5304..8b44b94c6 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,13 +1,13 @@
---
- name: Create passthrough route for docker-registry
- command: >
- {{ openshift.common.client_binary }} create route passthrough
- --service docker-registry
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: create_docker_registry_route
- changed_when: "'already exists' not in create_docker_registry_route.stderr"
- failed_when: "'already exists' not in create_docker_registry_route.stderr and create_docker_registry_route.rc != 0"
+ oc_route:
+ kubeconfig: "{{ openshift_hosted_kubeconfig }}"
+ name: docker-registry
+ namespace: default
+ service_name: docker-registry
+ state: present
+ tls_termination: passthrough
+ run_once: true
- name: Determine if registry certificate must be created
stat:
diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml
index 7d176bce3..89b92dfcc 100644
--- a/roles/openshift_hosted_templates/tasks/main.yml
+++ b/roles/openshift_hosted_templates/tasks/main.yml
@@ -4,6 +4,8 @@
become: False
register: copy_hosted_templates_mktemp
run_once: True
+ # AUDIT:changed_when: not set here because this task actually
+ # creates something
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/{{ content_version }}/{{ hosted_deployment_type }}" -cvf "{{ copy_hosted_templates_mktemp.stdout }}/openshift-hosted-templates.tar" .
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index bdb168921..d9eebe688 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -19,7 +19,7 @@ openshift_logging_curator_memory_limit: null
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default(kibana.{{openshift.common.dns_domain}}) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.{{openshift.common.dns_domain}}') }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 9621d0d1a..188ea246c 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -81,7 +81,6 @@
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
name: "{{ item }}"
namespace: "{{ openshift_logging_namespace }}"
state: absent
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
deleted file mode 100644
index ebe8f1ca8..000000000
--- a/roles/openshift_logging/tasks/label_node.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
- -o jsonpath='{.metadata.labels}'
- register: node_labels
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}}
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - value is defined
- - node_labels.stdout is defined
- - label not in node_labels.stdout
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
- -o jsonpath='{.metadata.labels.{{ label }}}'
- register: label_value
- ignore_errors: yes
- changed_when: no
- when:
- - value is defined
- - node_labels.stdout is defined
- - label in node_labels.stdout
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - value is defined
- - label_value.stdout is defined
- - label_value.stdout != value
- - unlabel is not defined or not unlabel
- - not ansible_check_mode
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
- register: label_result
- failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
- when:
- - unlabel is defined
- - unlabel
- - not ansible_check_mode
- - label in node_labels.stdout
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 69d2b2b6b..3e97487dc 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -1,125 +1,133 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- check_mode: no
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: start fluentd
- include: label_node.yaml
- vars:
- host: "{{fluentd_host}}"
- label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ oc_label:
+ name: "{{ fluentd_host }}"
+ kind: node
+ state: add
+ label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- changed_when: no
- name: start elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- check_mode: no
- changed_when: no
- name: start kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- check_mode: no
- changed_when: no
- name: start curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 1
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index 7826efabe..bae6aebbb 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -1,118 +1,133 @@
---
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
- register: fluentd_hosts
+- name: Retrieve list of fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
when: "'--all' in openshift_logging_fluentd_hosts"
- changed_when: no
+ register: fluentd_hosts
-- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+- name: Set fact openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
when: "'--all' in openshift_logging_fluentd_hosts"
- name: stop fluentd
- include: label_node.yaml
- vars:
- host: "{{fluentd_host}}"
- label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- unlabel: True
+ oc_label:
+ name: "{{ fluentd_host }}"
+ kind: node
+ state: absent
+ label: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- changed_when: no
- name: stop elasticsearch-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve kibana-ops
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=kibana-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: kibana_dc
- changed_when: no
- name: stop kibana-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{kibana_dc.stdout_lines}}"
+ with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+- name: Retrieve curator
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=curator-ops"
+ namespace: "{{openshift_logging_namespace}}"
register: curator_dc
- changed_when: no
- name: stop curator-ops
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
- kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
replicas: 0
- with_items: "{{curator_dc.stdout_lines}}"
+ with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index 0dc31932c..0421cdf58 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -8,29 +8,34 @@
start_cluster: False
# start ES so that we can run migrate script
-- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+- name: Retrieve elasticsearch
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: es_dc
- check_mode: no
- name: start elasticsearch
oc_scale:
kind: dc
- name: "{{object.split('/')[1]}}"
+ name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: 1
- with_items: "{{es_dc.stdout_lines}}"
+ with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
loop_control:
loop_var: object
-- command: >
- {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+- name: Wait for pods to stop
+ oc_obj:
+ state: list
+ kind: dc
+ selector: "component=es"
+ namespace: "{{openshift_logging_namespace}}"
register: running_pod
- until: running_pod.stdout != ''
+ until: running_pod.results.results.items[?(@.status.phase == "Running")].metadata.name != ''
retries: 30
delay: 10
- changed_when: no
- check_mode: no
- name: Run upgrade script
script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml
new file mode 100644
index 000000000..d90cd28cf
--- /dev/null
+++ b/roles/openshift_manage_node/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Manage Node
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index c06758833..9a883feed 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -1,23 +1,4 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- set_fact:
- openshift_manage_node_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_manage_node_kubeconfig }}
- changed_when: False
- delegate_to: "{{ openshift_master_host }}"
- run_once: true
-
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
@@ -46,38 +27,37 @@
run_once: true
- name: Wait for Node Registration
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} get node {{ openshift.node.nodename }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- register: omd_get_node
- until: omd_get_node.rc == 0
+ oc_obj:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: list
+ register: get_node
+ until: "'metadata' in get_node.results.results[0]"
retries: 50
delay: 5
- changed_when: false
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Set node schedulability
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable={{ 'true' if openshift.node.schedulable | bool else 'false' }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
+ oadm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: "{{ 'true' if openshift.node.schedulable | bool else 'false' }}"
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
when: "'nodename' in openshift.node"
delegate_to: "{{ openshift_master_host }}"
- name: Label nodes
- command: >
- {{ hostvars[openshift_master_host].openshift.common.client_binary }} label --overwrite node {{ openshift.node.nodename }} {{ openshift.node.labels | oo_combine_dict }}
- --config={{ openshift_manage_node_kubeconfig }}
- -n default
- when: "'nodename' in openshift.node and 'labels' in openshift.node and openshift.node.labels != {}"
- delegate_to: "{{ openshift_master_host }}"
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}"
+ namespace: default
+ when:
+ - "'nodename' in openshift.node"
+ - "'labels' in openshift.node"
+ - openshift.node.labels != {}
delegate_to: "{{ openshift_master_host }}"
- run_once: true
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index e58947fd2..f202486a5 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -47,6 +47,9 @@
register: oshawkular_create_cluster_role
failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0"
changed_when: oshawkular_create_cluster_role.rc == 0
+ # AUDIT:changed_when_note: Checking the return code is insufficient
+ # here. We really need to verify the if the role even exists before
+ # we run this task.
- name: Configure role/user permissions
command: >
@@ -56,6 +59,10 @@
register: osmiq_perm_task
failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
changed_when: osmiq_perm_task.rc == 0
+ # AUDIT:changed_when_note: Checking the return code is insufficient
+ # here. We really need to compare the current role/user permissions
+ # with their expected state. I think we may have a module for this?
+
- name: Configure 3_2 role/user permissions
command: >
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index 83843f126..edaa7d0df 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -32,7 +32,7 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
-openshift_metrics_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
+openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
openshift_metrics_duration: 7
openshift_metrics_resolution: 30s
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index b1d5f0e0f..609ca2a6e 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -75,3 +75,9 @@
# so containerized services should restart quickly as well.
retries: 24
delay: 5
+ # AUDIT:changed_when: `false` because we are only inspecting the
+ # state of the node, we aren't changing anything (we changed node
+ # service state in the previous task). You could say we shouldn't
+ # override this because something will be changing (the state of a
+ # service), but that should be part of the last task.
+ changed_when: false