summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--openshift-ansible.spec60
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml73
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_registry.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_router.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml4
-rw-r--r--roles/cockpit-ui/defaults/main.yml3
-rw-r--r--roles/cockpit-ui/tasks/main.yml4
-rw-r--r--roles/docker/defaults/main.yml14
-rw-r--r--roles/docker/tasks/package_docker.yml18
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml2
-rw-r--r--roles/docker/templates/registries.conf46
-rwxr-xr-xroles/nuage_master/templates/nuage-master-config-daemonset.j26
-rwxr-xr-xroles/nuage_master/templates/nuage-node-config-daemonset.j215
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_hosted/defaults/main.yml72
-rw-r--r--roles/openshift_hosted/meta/main.yml1
-rw-r--r--roles/openshift_hosted/tasks/create_projects.yml14
-rw-r--r--roles/openshift_hosted/tasks/firewall.yml (renamed from roles/openshift_hosted/tasks/router/firewall.yml)10
-rw-r--r--roles/openshift_hosted/tasks/main.yml20
-rw-r--r--roles/openshift_hosted/tasks/registry.yml (renamed from roles/openshift_hosted/tasks/registry/registry.yml)65
-rw-r--r--roles/openshift_hosted/tasks/registry/firewall.yml40
-rw-r--r--roles/openshift_hosted/tasks/router.yml (renamed from roles/openshift_hosted/tasks/router/router.yml)48
-rw-r--r--roles/openshift_hosted/tasks/secure.yml (renamed from roles/openshift_hosted/tasks/registry/secure.yml)4
-rw-r--r--roles/openshift_hosted/tasks/secure/passthrough.yml (renamed from roles/openshift_hosted/tasks/registry/secure/passthrough.yml)0
-rw-r--r--roles/openshift_hosted/tasks/secure/reencrypt.yml (renamed from roles/openshift_hosted/tasks/registry/secure/reencrypt.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml (renamed from roles/openshift_hosted/tasks/registry/storage/glusterfs.yml)0
-rw-r--r--roles/openshift_hosted/tasks/storage/object_storage.yml (renamed from roles/openshift_hosted/tasks/registry/storage/object_storage.yml)0
l---------roles/openshift_hosted/tasks/storage/registry_config.j2 (renamed from roles/openshift_hosted/tasks/registry/storage/registry_config.j2)0
-rw-r--r--roles/openshift_hosted/tasks/storage/s3.yml (renamed from roles/openshift_hosted/tasks/registry/storage/s3.yml)2
-rw-r--r--roles/openshift_hosted/tasks/wait_for_pod.yml36
-rw-r--r--roles/openshift_hosted/templates/registry_config.j24
-rw-r--r--roles/openshift_hosted/vars/main.yml11
-rw-r--r--roles/openshift_logging/README.md1
-rw-r--r--roles/openshift_logging/defaults/main.yml1
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml2
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml3
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j22
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service1
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service2
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml50
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml27
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml11
53 files changed, 460 insertions, 318 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index b2155c30f..e2756ea0e 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.127.0 ./
+3.7.0-0.129.0 ./
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index b5673cda1..91b223a80 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.127.0%{?dist}
+Release: 0.129.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -280,6 +280,64 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.129.0
+- Refactor openshift_hosted plays and role (mgugino@redhat.com)
+- Remove logging ES_COPY feature (jcantril@redhat.com)
+
+* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.128.0
+- check if the storage backend is set to etcd3 before upgrading to 3.7
+ (jchaloup@redhat.com)
+- crio: detect the correct version of the images (gscrivan@redhat.com)
+- crio: set the correct image name with OSE (gscrivan@redhat.com)
+- resolve #5428: python-dbus not found (ltheisen@mitre.org)
+- Updating default behavior for installing metrics and logging. Separating out
+ uninstall to own variable (ewolinet@redhat.com)
+- Add booleans to prevent unwanted install of nuage roles. (mgugino@redhat.com)
+- Set master facts prior to adding new etcd client urls to master config.
+ (abutcher@redhat.com)
+- Remove debugging statements and pause module (sdodson@redhat.com)
+- Fix registry_auth logic for upgrades (mgugino@redhat.com)
+- crio: skip installation on lbs and nfs nodes (gscrivan@redhat.com)
+- Remove override default.py callback plugin (rteague@redhat.com)
+- consolidate etcd_migrate role (jchaloup@redhat.com)
+- Add python3-PyYAML for Fedora installs (mgugino@redhat.com)
+- Do a full stop/start when etcd certificates had expired.
+ (abutcher@redhat.com)
+- Move additional/block/insecure registires to /etc/containers/registries.conf
+ (mgugino@redhat.com)
+- Improve CA playbook restart logic and skip restarts when related services had
+ previously expired certificates. (abutcher@redhat.com)
+- health checks: add diagnostics check (lmeyer@redhat.com)
+- Remove unused openshift_hosted_logging role (mgugino@redhat.com)
+- consolidate etcd_upgrade role (jchaloup@redhat.com)
+- disable excluders after all pre-checks (jchaloup@redhat.com)
+- Fixed AnsibleUnsafeText by converting to int (edu@redhat.com)
+- Ensure that hostname is lowercase (sdodson@redhat.com)
+- Fix deprecated subscription-manager command
+ (bliemli@users.noreply.github.com)
+- Returning actual results of yedit query. Empty list was returning empty
+ dict. (kwoodson@redhat.com)
+- Default openshift_pkg_version to full version-release during upgrades
+ (sdodson@redhat.com)
+- Creating structure to warn for use of deprecated variables and set them in a
+ single location before they are no longer honored (ewolinet@redhat.com)
+- Remove default value for oreg_url (mgugino@redhat.com)
+- Creating initial tsb role to consume and apply templates provided for tsb
+ (ewolinet@redhat.com)
+- Set network facts using first master's config during scaleup.
+ (abutcher@redhat.com)
+- Use 3.7 RPM repo (ahaile@redhat.com)
+- Changes for Nuage atomic ansible install
+ (rohan.s.parulekar@nuagenetworks.net)
+- Add 3.7 scheduler predicates (jsafrane@redhat.com)
+- Consolidate etcd certs roles (jchaloup@redhat.com)
+- GlusterFS can now be run more than once. Ability to add devices to nodes
+ (ttindell@isenpai.com)
+- Ensure valid search on resolv.conf (mateus.caruccio@getupcloud.com)
+- move (and rename) get_dns_ip filter into openshift_node_facts
+ (jdiaz@redhat.com)
+- cri-o: Allow full image override (smilner@redhat.com)
+
* Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0
- Updating to always configure api aggregation with installation
(ewolinet@redhat.com)
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
new file mode 100644
index 000000000..5ddafdb07
--- /dev/null
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - cockpit-ui
+ hosts: oo_first_master
+ roles:
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
new file mode 100644
index 000000000..8a60a30b8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -0,0 +1,9 @@
+---
+- name: Create Hosted Resources - persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
new file mode 100644
index 000000000..4b4f19690
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - openshift_default_storage_class
+ hosts: oo_first_master
+ roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 0e970f376..2cb404abe 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,57 +1,18 @@
---
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
-
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
-
- roles:
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- - role: openshift_hosted
- - role: openshift_metrics
- when: openshift_metrics_install_metrics | default(false) | bool
- - role: openshift_logging
- when: openshift_logging_install_logging | default(false) | bool
-
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
-
- - role: openshift_prometheus
- when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- name: Update master-config for publicLoggingURL
- hosts: oo_masters_to_config:!oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- tasks:
-
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_logging_install_logging | default(false) | bool
-
- - block:
- - include_role:
- name: openshift_metrics
- tasks_from: update_master_config
- when: openshift_metrics_install_metrics | default(false) | bool
+- include: create_persistent_volumes.yml
+
+- include: openshift_default_storage_class.yml
+
+- include: openshift_hosted_create_projects.yml
+
+- include: openshift_hosted_router.yml
+
+- include: openshift_hosted_registry.yml
+
+- include: openshift_metrics.yml
+
+- include: openshift_logging.yml
+
+- include: cockpit-ui.yml
+
+- include: openshift_prometheus.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
new file mode 100644
index 000000000..d5ca5185c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
@@ -0,0 +1,7 @@
+---
+- name: Create Hosted Resources - openshift projects
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_hosted
+ tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
new file mode 100644
index 000000000..2a91a827c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - registry
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: registry.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
new file mode 100644
index 000000000..bcb5a34a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - router
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: router.yml
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 1dc180c26..9cc98fe1e 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -2,7 +2,9 @@
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- - openshift_metrics
+ - role: openshift_metrics
+ when: openshift_metrics_install_metrics | default(false) | bool
+
- name: OpenShift Metrics
hosts: oo_masters:!oo_first_master
@@ -12,3 +14,4 @@
include_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: openshift_metrics_install_metrics | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index a979c0c00..ed89d3bde 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,9 +1,6 @@
---
-- include: std_include.yml
-
-- name: OpenShift Prometheus
+- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
roles:
- - openshift_prometheus
- vars:
- openshift_prometheus_state: present
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
new file mode 100644
index 000000000..f75ae3b15
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
@@ -0,0 +1,22 @@
+---
+- name: Verify all masters has etcd3 storage backend set
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - lib_utils
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 3549cf6c3..6cd3bd3e5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -29,6 +29,10 @@
tags:
- pre_upgrade
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
- name: Update repos on control plane hosts
hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
tags:
diff --git a/roles/cockpit-ui/defaults/main.yml b/roles/cockpit-ui/defaults/main.yml
new file mode 100644
index 000000000..b1696f1b8
--- /dev/null
+++ b/roles/cockpit-ui/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 0114498f8..244e2cc41 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -50,7 +50,9 @@
-n default
register: deploy_registry_console
changed_when: "'already exists' not in deploy_registry_console.stderr"
- failed_when: "'already exists' not in deploy_registry_console.stderr and deploy_registry_console.rc != 0"
+ failed_when:
+ - "'already exists' not in deploy_registry_console.stderr"
+ - "deploy_registry_console.rc != 0"
- name: Delete temp directory
file:
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index 81f3ee9e4..274fd8603 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -4,3 +4,17 @@ docker_cli_auth_config_path: '/root/.docker'
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_replace: False
+
+openshift_docker_additional_registries: []
+openshift_docker_blocked_registries: []
+openshift_docker_insecure_registries: []
+
+# The l2_docker_* variables convert csv strings to lists, if
+# necessary. These variables should be used in place of their respective
+# openshift_docker_* counterparts to ensure the properly formatted lists are
+# utilized.
+l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}"
+l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
+l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+
+containers_registries_conf_path: /etc/containers/registries.conf
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 16aea5067..0c5621259 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -53,22 +53,22 @@
- stat: path=/etc/sysconfig/docker
register: docker_check
-- name: Set registry params
+- name: Comment old registry params in /etc/sysconfig/docker
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}"
with_items:
- reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf
+ template:
+ dest: "{{ containers_registries_conf_path }}"
+ src: registries.conf
notify:
- restart docker
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index e6fc2db06..5b02b72be 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -104,7 +104,7 @@
- name: Use RHEL based image when distribution is Red Hat
set_fact:
- l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_prepend: "registry.access.redhat.com/openshift3"
l_crio_image_name: "cri-o"
when: ansible_distribution == "RedHat"
diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf
new file mode 100644
index 000000000..c55dbd84f
--- /dev/null
+++ b/roles/docker/templates/registries.conf
@@ -0,0 +1,46 @@
+# {{ ansible_managed }}
+# This is a system-wide configuration file used to
+# keep track of registries for various container backends.
+# It adheres to YAML format and does not support recursive
+# lists of registries.
+
+# The default location for this configuration file is /etc/containers/registries.conf.
+
+# The only valid categories are: 'registries', 'insecure_registies',
+# and 'block_registries'.
+
+
+#registries:
+# - registry.access.redhat.com
+
+{% if l2_docker_additional_registries %}
+registries:
+{% for reg in l2_docker_additional_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to access insecure registries, uncomment the section below
+# and add the registries fully-qualified name. An insecure registry is one
+# that does not have a valid SSL certificate or only does HTTP.
+#insecure_registries:
+# -
+
+{% if l2_docker_insecure_registries %}
+insecure_registries:
+{% for reg in l2_docker_insecure_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
+
+# If you need to block pull access from a registry, uncomment the section below
+# and add the registries fully-qualified name.
+#block_registries:
+# -
+
+{% if l2_docker_blocked_registries %}
+block_registries:
+{% for reg in l2_docker_blocked_registries %}
+ - {{ reg }}
+{% endfor %}
+{% endif %}
diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
index 612d689c2..7be5d6743 100755
--- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2
@@ -62,16 +62,14 @@ spec:
selector:
matchLabels:
k8s-app: nuage-master-config
+ updateStrategy:
+ type: RollingUpdate
template:
metadata:
labels:
k8s-app: nuage-master-config
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
nodeSelector:
install-monitor: "true"
containers:
diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
index 02e9a1563..6a1267d94 100755
--- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2
+++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2
@@ -23,7 +23,7 @@ data:
# IP address and port number of master API server
masterApiServer: {{ api_server_url }}
# REST server URL
- nuageMonRestServer: {{ nuage_mon_rest_server_url }}
+ nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}
# Bridge name for the docker bridge
dockerBridgeName: docker0
# Certificate for connecting to the openshift monitor REST api
@@ -32,11 +32,6 @@ data:
nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key
# CA certificate for verifying the master's rest server
nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt
- # Nuage vport mtu size
- interfaceMTU: {{ nuage_vport_mtu }}
- # Logging level for the plugin
- # allowed options are: "dbg", "info", "warn", "err", "emer", "off"
- logLevel: 3
# This will generate the required Nuage CNI yaml configuration
cni_yaml_config: |
@@ -72,10 +67,6 @@ spec:
k8s-app: nuage-cni-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage CNI binaries
# and CNI network config file on each node.
@@ -157,10 +148,6 @@ spec:
k8s-app: nuage-vrs-ds
spec:
hostNetwork: true
- tolerations:
- - key: node-role.kubernetes.io/master
- effect: NoSchedule
- operator: Exists
containers:
# This container installs Nuage VRS running as a
# container on each worker node
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index d8bfca62a..fdf01b7c2 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/"
nuage_plugin_crt_dir: /usr/share/vsp-openshift
openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node
-nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
+nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d"
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 712a2a591..c234c3740 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,14 +1,33 @@
---
+##########
+# Common #
+##########
+openshift_hosted_infra_selector: "region=infra"
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
+
+openshift_default_projects:
+ default:
+ default_node_selector: ''
+ logging:
+ default_node_selector: ''
+ openshift-infra:
+ default_node_selector: ''
+
+# openshift_additional_projects shares the same format as openshift_default_projects
+openshift_additional_projects: {}
+
+openshift_config_base: "/etc/origin"
+openshift_master_config_dir: "{{ openshift.common.config_base | default(openshift_config_base) }}/master"
+openshift_cluster_domain: 'cluster.local'
+
+##########
+# Router #
+##########
r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-
openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
-openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
-
-registry_volume_claim: 'registry-claim'
openshift_hosted_router_edits:
- key: spec.strategy.rollingParams.intervalSeconds
@@ -36,20 +55,49 @@ openshift_hosted_routers:
certificate: "{{ openshift_hosted_router_certificate | default({}) }}"
openshift_hosted_router_certificate: {}
-openshift_hosted_registry_cert_expire_days: 730
openshift_hosted_router_create_certificate: True
r_openshift_hosted_router_os_firewall_deny: []
r_openshift_hosted_router_os_firewall_allow: []
+############
+# Registry #
+############
+
+r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+openshift_hosted_registry_name: docker-registry
+openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
+registry_volume_claim: 'registry-claim'
+openshift_hosted_registry_cert_expire_days: 730
+
r_openshift_hosted_registry_os_firewall_deny: []
r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
-# NOTE
-# r_openshift_hosted_use_calico_default may be defined external to this role.
-# openshift_use_calico, if defined, may affect other roles or play behavior.
-r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
-r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
+openshift_hosted_registry_serviceaccount: registry
+openshift_hosted_registry_volumes: []
+openshift_hosted_registry_env_vars: {}
+
+# These edits are being specified only to prevent 'changed' on rerun
+openshift_hosted_registry_edits:
+- key: spec.strategy.rollingParams
+ value:
+ intervalSeconds: 1
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ action: put
+
+openshift_hosted_registry_force:
+- False
+
+openshift_push_via_dns: False
+
+# NOTE: settting openshift_docker_hosted_registry_insecure may affect other roles
+openshift_hosted_docker_registry_insecure_default: "{{ openshift_docker_hosted_registry_insecure | default(False) }}"
+openshift_hosted_docker_registry_insecure: "{{ openshift_hosted_docker_registry_insecure_default }}"
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 28fd396d6..1d70ef7eb 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -12,7 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_cli
- role: openshift_hosted_facts
- role: lib_openshift
- role: lib_os_firewall
diff --git a/roles/openshift_hosted/tasks/create_projects.yml b/roles/openshift_hosted/tasks/create_projects.yml
new file mode 100644
index 000000000..1b25d0c64
--- /dev/null
+++ b/roles/openshift_hosted/tasks/create_projects.yml
@@ -0,0 +1,14 @@
+---
+- name: Create default projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_default_projects }}"
+
+- name: Create additional projects
+ oc_project:
+ name: "{{ item.key }}"
+ node_selector:
+ - "{{ item.value.default_node_selector }}"
+ with_dict: "{{ openshift_additional_projects }}"
diff --git a/roles/openshift_hosted/tasks/router/firewall.yml b/roles/openshift_hosted/tasks/firewall.yml
index ff90f3372..1eb2c92c8 100644
--- a/roles/openshift_hosted/tasks/router/firewall.yml
+++ b/roles/openshift_hosted/tasks/firewall.yml
@@ -8,7 +8,7 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -17,9 +17,9 @@
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
-- when: r_openshift_hosted_router_firewall_enabled | bool and r_openshift_hosted_router_use_firewalld | bool
+- when: l_openshift_hosted_firewall_enabled | bool and l_openshift_hosted_use_firewalld | bool
block:
- name: Add firewalld allow rules
firewalld:
@@ -28,7 +28,7 @@
immediate: true
state: enabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ with_items: "{{ l_openshift_hosted_fw_allow }}"
- name: Remove firewalld allow rules
firewalld:
@@ -37,4 +37,4 @@
immediate: true
state: disabled
when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_router_os_firewall_deny }}"
+ with_items: "{{ l_openshift_hosted_fw_deny }}"
diff --git a/roles/openshift_hosted/tasks/main.yml b/roles/openshift_hosted/tasks/main.yml
index 6efe2f63c..d306adf42 100644
--- a/roles/openshift_hosted/tasks/main.yml
+++ b/roles/openshift_hosted/tasks/main.yml
@@ -1,13 +1,9 @@
---
-- name: Create projects
- oc_project:
- name: "{{ item.key }}"
- node_selector:
- - "{{ item.value.default_node_selector }}"
- with_dict: "{{ openshift_projects }}"
-
-- include: router/router.yml
- when: openshift_hosted_manage_router | default(true) | bool
-
-- include: registry/registry.yml
- when: openshift_hosted_manage_registry | default(true) | bool
+# This role is intended to be used with include_role.
+# include_role:
+# name: openshift_hosted
+# tasks_from: "{{ item }}"
+# with_items:
+# - create_projects.yml
+# - router.yml
+# - registry.yml
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry.yml
index 48f53aef8..f1aa9c5a8 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_registry_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_registry_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
- when: openshift.hosted.registry.replicas | default(none) is none
block:
@@ -36,30 +40,14 @@
- name: set openshift_hosted facts
set_fact:
openshift_hosted_registry_replicas: "{{ openshift.hosted.registry.replicas | default(l_default_replicas) }}"
- openshift_hosted_registry_name: docker-registry
- openshift_hosted_registry_serviceaccount: registry
openshift_hosted_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
openshift_hosted_registry_selector: "{{ openshift.hosted.registry.selector }}"
openshift_hosted_registry_images: "{{ openshift.hosted.registry.registryurl | default('openshift3/ose-${component}:${version}')}}"
- openshift_hosted_registry_volumes: []
- openshift_hosted_registry_env_vars: {}
- openshift_hosted_registry_edits:
- # These edits are being specified only to prevent 'changed' on rerun
- - key: spec.strategy.rollingParams
- value:
- intervalSeconds: 1
- maxSurge: "25%"
- maxUnavailable: "25%"
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- action: put
- openshift_hosted_registry_force:
- - False
- name: Update registry environment variables when pushing via dns
set_fact:
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
- when: openshift_push_via_dns | default(false) | bool
+ when: openshift_push_via_dns | bool
- name: Update registry proxy settings for dc/docker-registry
set_fact:
@@ -137,36 +125,17 @@
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
-- when: openshift_hosted_registry_wait | bool
- block:
- - name: Ensure OpenShift registry correctly rolls out (best-effort today)
- command: |
- oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- failed_when: false
-
- - name: Determine the latest version of the OpenShift registry deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_registry_latest_version
-
- - name: Sanity-check that the OpenShift registry rolled out correctly
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
- --namespace {{ openshift_hosted_registry_namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_registry_rc_phase
- until: "'Running' not in openshift_hosted_registry_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout"
+- name: setup registry list
+ set_fact:
+ r_openshift_hosted_registry_list:
+ - name: "{{ openshift_hosted_registry_name }}"
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+
+- name: Wait for pod (Registry)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}"
+ l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}"
- include: storage/glusterfs.yml
when:
diff --git a/roles/openshift_hosted/tasks/registry/firewall.yml b/roles/openshift_hosted/tasks/registry/firewall.yml
deleted file mode 100644
index 775b7d6d7..000000000
--- a/roles/openshift_hosted/tasks/registry/firewall.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- when: r_openshift_hosted_registry_firewall_enabled | bool and not r_openshift_hosted_registry_use_firewalld | bool
- block:
- - name: Add iptables allow rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: add
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
-
- - name: Remove iptables rules
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: remove
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
-
-- when: r_openshift_hosted_registry_firewall_enabled | bool and r_openshift_hosted_registry_use_firewalld | bool
- block:
- - name: Add firewalld allow rules
- firewalld:
- port: "{{ item.port }}"
- permanent: true
- immediate: true
- state: enabled
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_allow }}"
-
- - name: Remove firewalld allow rules
- firewalld:
- port: "{{ item.port }}"
- permanent: true
- immediate: true
- state: disabled
- when: item.cond | default(True)
- with_items: "{{ r_openshift_hosted_registry_os_firewall_deny }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router.yml
index 2a42b5a7c..2aeecc943 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -1,7 +1,11 @@
---
- name: setup firewall
include: firewall.yml
- static: yes
+ vars:
+ l_openshift_hosted_firewall_enabled: "{{ r_openshift_hosted_router_firewall_enabled }}"
+ l_openshift_hosted_use_firewalld: "{{ r_openshift_hosted_router_use_firewalld }}"
+ l_openshift_hosted_fw_allow: "{{ r_openshift_hosted_router_os_firewall_allow }}"
+ l_openshift_hosted_fw_deny: "{{ r_openshift_hosted_router_os_firewall_deny }}"
- name: Retrieve list of openshift nodes matching router selector
oc_obj:
@@ -82,7 +86,7 @@
replicas: "{{ item.replicas }}"
namespace: "{{ item.namespace | default('default') }}"
# This option is not yet implemented
- # force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
+ # force_subdomain: "{{ openshift_hosted_router_force_subdomain | default(none) }}"
service_account: "{{ item.serviceaccount | default('router') }}"
selector: "{{ item.selector | default(none) }}"
images: "{{ item.images | default(omit) }}"
@@ -94,38 +98,8 @@
stats_port: "{{ item.stats_port }}"
with_items: "{{ openshift_hosted_routers }}"
-- when: openshift_hosted_router_wait | bool
- block:
- - name: Ensure OpenShift router correctly rolls out (best-effort today)
- command: |
- {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace | default('default') }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig
- async: 600
- poll: 15
- with_items: "{{ openshift_hosted_routers }}"
- failed_when: false
-
- - name: Determine the latest version of the OpenShift router deployment
- command: |
- {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
- --namespace {{ item.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .status.latestVersion }'
- register: openshift_hosted_routers_latest_version
- with_items: "{{ openshift_hosted_routers }}"
-
- - name: Poll for OpenShift router deployment success
- command: |
- {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
- --namespace {{ item.0.namespace }} \
- --config {{ openshift.common.config_base }}/master/admin.kubeconfig \
- -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
- register: openshift_hosted_router_rc_phase
- until: "'Running' not in openshift_hosted_router_rc_phase.stdout"
- delay: 15
- retries: 40
- failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout"
- with_together:
- - "{{ openshift_hosted_routers }}"
- - "{{ openshift_hosted_routers_latest_version.results }}"
+- name: Wait for pod (Routers)
+ include: wait_for_pod.yml
+ vars:
+ l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}"
+ l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index 434b679df..0da8ac8a7 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -38,11 +38,11 @@
- "{{ docker_registry_service.results.clusterip }}"
- "{{ docker_registry_route.results[0].spec.host }}"
- "{{ openshift_hosted_registry_name }}.default.svc"
- - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift.common.dns_domain }}"
+ - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
- "{{ openshift_hosted_registry_routehost }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
- expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool else omit }}"
+ expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}"
register: registry_self_cert
when: docker_registry_self_signed
diff --git a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml b/roles/openshift_hosted/tasks/secure/passthrough.yml
index 5b44fda10..5b44fda10 100644
--- a/roles/openshift_hosted/tasks/registry/secure/passthrough.yml
+++ b/roles/openshift_hosted/tasks/secure/passthrough.yml
diff --git a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml b/roles/openshift_hosted/tasks/secure/reencrypt.yml
index 48e5b0fba..48e5b0fba 100644
--- a/roles/openshift_hosted/tasks/registry/secure/reencrypt.yml
+++ b/roles/openshift_hosted/tasks/secure/reencrypt.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index c2954fde1..c2954fde1 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/storage/object_storage.yml
index 8553a8098..8553a8098 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/storage/object_storage.yml
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/storage/registry_config.j2
index f3e82ad4f..f3e82ad4f 120000
--- a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2
+++ b/roles/openshift_hosted/tasks/storage/registry_config.j2
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/storage/s3.yml
index 318969885..8e905d905 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/storage/s3.yml
@@ -3,7 +3,7 @@
assert:
that:
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- - openshift.hosted.registry.storage.s3.region | default(none) is not none
+ - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
openshift_hosted_registry_storage_s3_bucket
diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml
new file mode 100644
index 000000000..056c79334
--- /dev/null
+++ b/roles/openshift_hosted/tasks/wait_for_pod.yml
@@ -0,0 +1,36 @@
+---
+- when: l_openshift_hosted_wait_for_pod | default(False) | bool
+ block:
+ - name: Ensure OpenShift pod correctly rolls out (best-effort today)
+ command: |
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace | default('default') }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig
+ async: 600
+ poll: 15
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+ failed_when: false
+
+ - name: Determine the latest version of the OpenShift pod deployment
+ command: |
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
+ --namespace {{ item.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .status.latestVersion }'
+ register: l_openshift_hosted_wfp_latest_version
+ with_items: "{{ l_openshift_hosted_wfp_items }}"
+
+ - name: Poll for OpenShift pod deployment success
+ command: |
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ --namespace {{ item.0.namespace }} \
+ --config {{ openshift_master_config_dir }}/admin.kubeconfig \
+ -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
+ register: openshift_hosted_wfp_rc_phase
+ until: "'Running' not in openshift_hosted_wfp_rc_phase.stdout"
+ delay: 15
+ retries: 40
+ failed_when: "'Failed' in openshift_hosted_wfp_rc_phase.stdout"
+ with_together:
+ - "{{ l_openshift_hosted_wfp_items }}"
+ - "{{ l_openshift_hosted_wfp_latest_version.results }}"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index 61da452de..eae8b328e 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -70,10 +70,8 @@ auth:
openshift:
realm: openshift
middleware:
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
registry:
- name: openshift
-{% endif %}
repository:
- name: openshift
options:
@@ -87,7 +85,7 @@ middleware:
baseurl: {{ openshift_hosted_registry_storage_s3_cloudfront_baseurl }}
privatekey: /etc/origin/cloudfront.pem
keypairid: {{ openshift_hosted_registry_storage_s3_cloudfront_keypairid }}
-{% elif openshift.common.version_gte_3_3_or_1_3 | bool %}
+{% else %}
storage:
- name: openshift
{% endif -%}
diff --git a/roles/openshift_hosted/vars/main.yml b/roles/openshift_hosted/vars/main.yml
index 0821d0e7e..0e756d9e1 100644
--- a/roles/openshift_hosted/vars/main.yml
+++ b/roles/openshift_hosted/vars/main.yml
@@ -1,13 +1,2 @@
---
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
registry_config_secret_name: registry-config
-
-openshift_default_projects:
- default:
- default_node_selector: ''
- logging:
- default_node_selector: ''
- openshift-infra:
- default_node_selector: ''
-
-openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts(openshift_default_projects) }}"
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index f283261c4..de3d19858 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -62,7 +62,6 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
-- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 6699e2062..db4262fed 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -74,7 +74,6 @@ openshift_logging_kibana_ops_ca: ""
openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
-openshift_logging_fluentd_es_copy: false
openshift_logging_fluentd_journal_source: ""
openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 30d3d854a..82326bdd1 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -50,8 +50,6 @@ openshift_logging_fluentd_aggregating_key_path: none
openshift_logging_fluentd_aggregating_passphrase: none
### Deprecating in 3.6
-openshift_logging_fluentd_es_copy: false
-
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#fluentd_config_contents:
#fluentd_throttle_contents:
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 74b4d7db4..37960afd1 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -1,5 +1,8 @@
---
- fail:
+ msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory
+ when: openshift_logging_fluentd_es_copy is defined
+- fail:
msg: Only one Fluentd nodeselector key pair should be provided
when: openshift_logging_fluentd_nodeselector.keys() | count > 1
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index a4afb6618..1c0d1089f 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -94,8 +94,6 @@ spec:
value: "{{ openshift_logging_fluentd_ops_client_key }}"
- name: "OPS_CA"
value: "{{ openshift_logging_fluentd_ops_ca }}"
- - name: "ES_COPY"
- value: "false"
- name: "JOURNAL_SOURCE"
value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
- name: "JOURNAL_READ_FROM_HEAD"
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 8734e7443..fa7238849 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 4ab10b95f..310d8b29d 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -34,6 +34,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
-v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
-v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
-v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
+ {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \
-v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
{% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
{{ openshift.node.node_image }}:${IMAGE_VERSION}
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 4c47f8c0d..aae35719c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service
[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
ExecStop=
SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index bc0dde17d..3f6dab78b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -51,8 +51,8 @@
kind: pod
state: list
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until: "heketi_pod.results.results[0]['items'] | count == 0"
+ register: deploy_heketi_pod
+ until: "deploy_heketi_pod.results.results[0]['items'] | count == 0"
delay: 10
retries: "{{ (glusterfs_timeout | int / 10) | int }}"
when: glusterfs_heketi_wipe
@@ -103,7 +103,7 @@
state: list
kind: pod
selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
+ register: deploy_heketi_pod
when: glusterfs_heketi_is_native
- name: Check if need to deploy deploy-heketi
@@ -111,9 +111,9 @@
glusterfs_heketi_deploy_is_missing: False
when:
- "glusterfs_heketi_is_native"
- - "heketi_pod.results.results[0]['items'] | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
# deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
- name: Check for existing heketi pod
oc_obj:
@@ -147,6 +147,21 @@
when:
- glusterfs_heketi_is_native
+- name: Get heketi admin secret
+ oc_secret:
+ state: list
+ namespace: "{{ glusterfs_namespace }}"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ decode: True
+ register: glusterfs_heketi_admin_secret
+
+- name: Set heketi admin key
+ set_fact:
+ glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}"
+ when:
+ - glusterfs_is_native
+ - glusterfs_heketi_admin_secret.results.results[0]
+
- name: Generate heketi admin key
set_fact:
glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
@@ -190,14 +205,37 @@
- glusterfs_heketi_deploy_is_missing
- glusterfs_heketi_is_missing
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+ register: deploy_heketi_pod
+ until:
+ - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when:
+ - glusterfs_heketi_is_native
+ - not glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
changed_when: False
+- name: Place heketi topology on heketi Pod
+ shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_is_native
+
- name: Load heketi topology
command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
register: topology_load
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 3ba1eb2d2..73396c9af 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -6,16 +6,6 @@
with_items:
- "deploy-heketi-template.yml"
-- name: Create heketi topology secret
- oc_secret:
- namespace: "{{ glusterfs_namespace }}"
- state: present
- name: "heketi-{{ glusterfs_name }}-topology-secret"
- force: True
- files:
- - name: topology.json
- path: "{{ mktemp.stdout }}/topology.json"
-
- name: Create deploy-heketi template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
@@ -39,18 +29,7 @@
HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- TOPOLOGY_PATH: "{{ mktemp.stdout }}"
-- name: Wait for deploy-heketi pod
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- kind: pod
- state: list
- selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
- register: heketi_pod
- until:
- - "heketi_pod.results.results[0]['items'] | count > 0"
- # Pod's 'Ready' status must be True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
- delay: 10
- retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+- name: Set heketi Deployed fact
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index afc04a537..54a6dd7c3 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index a2a579e9d..b727eb74d 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,6 +1,6 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
@@ -22,7 +22,9 @@
command: >
docker run --rm {{ openshift.common.cli_image }}:latest version
register: cli_image_version
- when: openshift_version is not defined
+ when:
+ - openshift_version is not defined
+ - not l_use_crio_only
# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
- set_fact:
@@ -31,6 +33,7 @@
- openshift_version is not defined
- openshift.common.deployment_type == 'origin'
- cli_image_version.stdout_lines[0].split('-') | length > 1
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
@@ -45,14 +48,14 @@
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
- set_fact:
openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
when:
- openshift_version is defined
- openshift_version.split('.') | length == 2
- - not l_use_crio
+ - not l_use_crio_only
# TODO: figure out a way to check for the openshift_version when using CRI-O.
# We should do that using the images in the ostree storage so we don't have