summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/handlers/main.yml4
-rw-r--r--roles/calico/templates/calico.service.j24
-rw-r--r--roles/contiv/tasks/netplugin.yml2
-rw-r--r--roles/contiv/templates/aci-gw.service2
-rw-r--r--roles/dns/templates/named.service.j28
-rw-r--r--roles/docker/README.md9
-rw-r--r--roles/docker/handlers/main.yml2
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/main.yml122
-rw-r--r--roles/docker/tasks/package_docker.yml116
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml144
-rw-r--r--roles/docker/templates/daemon.json21
-rw-r--r--roles/docker/templates/systemcontainercustom.conf.j217
-rw-r--r--roles/docker/vars/main.yml3
-rw-r--r--roles/etcd/defaults/main.yaml1
-rw-r--r--roles/etcd/files/etcdctl.sh11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml116
-rw-r--r--roles/etcd/templates/etcd.docker.service12
-rw-r--r--roles/etcd_common/README.md37
-rw-r--r--roles/etcd_common/defaults/main.yml3
-rw-r--r--roles/etcd_common/tasks/etcdctl.yml (renamed from roles/etcd/tasks/etcdctl.yml)6
-rw-r--r--roles/etcd_common/templates/etcdctl.sh.j212
-rw-r--r--roles/etcd_server_certificates/meta/main.yml2
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py4
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py45
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py4
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py4
-rw-r--r--roles/lib_openshift/library/oc_configmap.py4
-rw-r--r--roles/lib_openshift/library/oc_edit.py4
-rw-r--r--roles/lib_openshift/library/oc_env.py4
-rw-r--r--roles/lib_openshift/library/oc_group.py4
-rw-r--r--roles/lib_openshift/library/oc_image.py4
-rw-r--r--roles/lib_openshift/library/oc_label.py4
-rw-r--r--roles/lib_openshift/library/oc_obj.py6
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py4
-rw-r--r--roles/lib_openshift/library/oc_process.py4
-rw-r--r--roles/lib_openshift/library/oc_project.py4
-rw-r--r--roles/lib_openshift/library/oc_pvc.py4
-rw-r--r--roles/lib_openshift/library/oc_route.py4
-rw-r--r--roles/lib_openshift/library/oc_scale.py4
-rw-r--r--roles/lib_openshift/library/oc_secret.py4
-rw-r--r--roles/lib_openshift/library/oc_service.py4
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py4
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py4
-rw-r--r--roles/lib_openshift/library/oc_user.py4
-rw-r--r--roles/lib_openshift/library/oc_version.py4
-rw-r--r--roles/lib_openshift/library/oc_volume.py16
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py41
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py2
-rw-r--r--roles/lib_openshift/src/doc/volume12
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/lib_openshift/src/test/integration/filter_plugins/filters.py1
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_label.yml2
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_user.yml2
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py2
-rw-r--r--roles/lib_utils/library/repoquery.py30
-rw-r--r--roles/lib_utils/library/yedit.py3
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py1
-rw-r--r--roles/lib_utils/src/class/repoquery.py28
-rw-r--r--roles/lib_utils/src/class/yedit.py2
-rw-r--r--roles/lib_utils/src/lib/import.py1
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py1
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py3
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py4
-rw-r--r--roles/openshift_certificate_expiry/tasks/main.yml6
-rw-r--r--roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py3
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py2
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml2
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml1
-rw-r--r--roles/openshift_etcd_ca/tasks/main.yml1
-rw-r--r--roles/openshift_excluder/README.md13
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/verify_excluder.yml35
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml15
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py20
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py1
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py1
-rwxr-xr-xroles/openshift_health_checker/library/check_yum_update.py1
-rw-r--r--roles/openshift_hosted/README.md8
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml43
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml12
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml4
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j28
-rw-r--r--roles/openshift_logging/README.md2
-rw-r--r--roles/openshift_logging/defaults/main.yml16
-rw-r--r--roles/openshift_logging/handlers/main.yml9
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml34
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml41
-rw-r--r--roles/openshift_logging/tasks/main.yaml2
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml4
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml8
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml7
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml2
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/templates/fluentd.j26
-rw-r--r--roles/openshift_logging/templates/kibana.j235
-rw-r--r--roles/openshift_master/tasks/main.yml2
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml10
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.service23
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j28
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j28
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j28
-rw-r--r--roles/openshift_master/templates/origin-master.service23
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml8
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml4
-rw-r--r--roles/openshift_metrics/README.md2
-rw-r--r--roles/openshift_metrics/handlers/main.yml9
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml16
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml6
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml8
-rw-r--r--roles/openshift_node/templates/atomic-openshift-node.service22
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service8
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_node/templates/origin-node.service21
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml4
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service4
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service8
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
-rw-r--r--roles/openshift_repos/tasks/main.yaml17
-rw-r--r--roles/openshift_storage_glusterfs/README.md36
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml21
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml166
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml22
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml42
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml35
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml16
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml38
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml170
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j22
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j22
-rw-r--r--roles/openshift_version/meta/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml20
-rw-r--r--roles/openshift_version/tasks/set_version_rpm.yml50
-rw-r--r--roles/os_firewall/README.md2
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml2
164 files changed, 1485 insertions, 754 deletions
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 65d75cf00..53cecfcc3 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -5,4 +5,6 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index b882a5597..7a1236392 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -1,7 +1,7 @@
[Unit]
Description=calico
-After=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
Restart=always
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 97b9762df..0847c92bc 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -105,7 +105,7 @@
- name: Docker | Restart docker
service:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: docker_updated|changed
diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service
index 8e4b66fbe..4506d2231 100644
--- a/roles/contiv/templates/aci-gw.service
+++ b/roles/contiv/templates/aci-gw.service
@@ -1,6 +1,6 @@
[Unit]
Description=Contiv ACI gw
-After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift.docker.service_name }}.service
[Service]
ExecStart={{ bin_dir }}/aci_gw.sh start
diff --git a/roles/dns/templates/named.service.j2 b/roles/dns/templates/named.service.j2
index 566739f25..6e0a7a640 100644
--- a/roles/dns/templates/named.service.j2
+++ b/roles/dns/templates/named.service.j2
@@ -1,7 +1,7 @@
[Unit]
-Requires=docker.service
-After=docker.service
-PartOf=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
Type=simple
@@ -12,4 +12,4 @@ ExecStart=/usr/bin/docker run --name bind -p 53:53/udp -v /var/log:/var/log -v /
ExecStop=/usr/bin/docker stop bind
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/docker/README.md b/roles/docker/README.md
index ea06fd41a..4a9f21f22 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -1,7 +1,9 @@
Docker
=========
-Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
+
+daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
@@ -11,8 +13,10 @@ Ansible 2.2
Role Variables
--------------
-udevw_udevd_dir: location of systemd config for systemd-udevd.service
+docker_conf_dir: location of the Docker configuration directory
+docker_systemd_dir location of the systemd directory for Docker
docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
+udevw_udevd_dir: location of systemd config for systemd-udevd.service
Dependencies
------------
@@ -26,6 +30,7 @@ Example Playbook
roles:
- role: docker
docker_udev_workaround: "true"
+ docker_use_system_container: False
License
-------
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 9ccb306fc..7f91afb37 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,7 +2,7 @@
- name: restart docker
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: not docker_service_status_changed | default(false) | bool
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index ad28cece9..cd4083572 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: os_firewall
+- role: lib_openshift
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index c34700aeb..0c2b16acf 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,119 +1,17 @@
---
-- name: Get current installed Docker version
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool
- register: curr_docker_version
- changed_when: false
-
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
-
-# Make sure Docker is installed, but does not update a running version.
-# Docker upgrades are handled by a separate playbook.
-- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
-
-- block:
- # Extend the default Docker service unit file when using iptables-services
- - name: Ensure docker.service.d directory exists
- file:
- path: "{{ docker_systemd_dir }}"
- state: directory
-
- - name: Configure Docker service unit file
- template:
- dest: "{{ docker_systemd_dir }}/custom.conf"
- src: custom.conf.j2
- when: not os_firewall_use_firewalld | default(True) | bool
+# These tasks dispatch to the proper set of docker tasks based on the
+# inventory:openshift_docker_use_system_container variable
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
-- stat: path=/etc/sysconfig/docker
- register: docker_check
-
-- name: Set registry params
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
- when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
- with_items:
- - reg_conf_var: ADD_REGISTRY
- reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
- reg_flag: --add-registry
- - reg_conf_var: BLOCK_REGISTRY
- reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
- reg_flag: --block-registry
- - reg_conf_var: INSECURE_REGISTRY
- reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
- reg_flag: --insecure-registry
- notify:
- - restart docker
-
-- name: Set Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^{{ item.reg_conf_var }}=.*$'
- line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
- state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
- with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') }}"
- notify:
- - restart docker
- when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
-
-- name: Set various Docker options
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: '^OPTIONS=.*$'
- line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
- when: docker_check.stat.isreg is defined and docker_check.stat.isreg
- notify:
- - restart docker
-
-- name: Start the Docker service
- systemd:
- name: docker
- enabled: yes
- state: started
- daemon_reload: yes
- register: start_result
-
- set_fact:
- docker_service_status_changed: start_result | changed
+ l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}"
+
+- name: Use Package Docker if Requested
+ include: package_docker.yml
+ when: not l_use_system_container
-- meta: flush_handlers
+- name: Use System Container Docker if Requested
+ include: systemcontainer_docker.yml
+ when: l_use_system_container
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
new file mode 100644
index 000000000..e101730d2
--- /dev/null
+++ b/roles/docker/tasks/package_docker.yml
@@ -0,0 +1,116 @@
+---
+- name: Get current installed Docker version
+ command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ when: not openshift.common.is_atomic | bool
+ register: curr_docker_version
+ changed_when: false
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+
+# Make sure Docker is installed, but does not update a running version.
+# Docker upgrades are handled by a separate playbook.
+- name: Install Docker
+ package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ when: not openshift.common.is_atomic | bool
+
+- block:
+ # Extend the default Docker service unit file when using iptables-services
+ - name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
+
+ - name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: not os_firewall_use_firewalld | default(False) | bool
+
+- stat: path=/etc/sysconfig/docker
+ register: docker_check
+
+- name: Set registry params
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"
+ when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg
+ with_items:
+ - reg_conf_var: ADD_REGISTRY
+ reg_fact_val: "{{ docker_additional_registries | default(None, true)}}"
+ reg_flag: --add-registry
+ - reg_conf_var: BLOCK_REGISTRY
+ reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}"
+ reg_flag: --block-registry
+ - reg_conf_var: INSECURE_REGISTRY
+ reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}"
+ reg_flag: --insecure-registry
+ notify:
+ - restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') }}"
+ notify:
+ - restart docker
+ when:
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+
+- name: Set various Docker options
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^OPTIONS=.*$'
+ line: "OPTIONS='\
+ {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
+ {% if docker_options is defined %} {{ docker_options }}{% endif %}\
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ when: docker_check.stat.isreg is defined and docker_check.stat.isreg
+ notify:
+ - restart docker
+
+- name: Start the Docker service
+ systemd:
+ name: docker
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+- set_fact:
+ docker_service_status_changed: start_result | changed
+
+- meta: flush_handlers
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
new file mode 100644
index 000000000..a461c479a
--- /dev/null
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -0,0 +1,144 @@
+---
+# If docker_options are provided we should fail. We should not install docker and ignore
+# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
+- name: Fail quickly if openshift_docker_options are set
+ assert:
+ that:
+ - docker_options is defined
+ - docker_options != ""
+ msg: |
+ Docker via System Container does not allow for the use of the openshift_docker_options
+ variable. If you want to use openshift_docker_options you will need to use the
+ traditional docker package install. Otherwise, comment out openshift_docker_options
+ in your inventory file.
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+
+# If we are on atomic, set http_proxy and https_proxy in /etc/atomic.conf
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ path: /etc/atomic.conf
+ line: "http_proxy={{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ path: /etc/atomic.conf
+ line: "https_proxy={{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ when: openshift.common.is_atomic | bool
+
+
+- block:
+
+ - name: Set to default prepend
+ set_fact:
+ l_docker_image_prepend: "gscrivano"
+
+ - name: Use Red Hat Registry for image when distribution is Red Hat
+ set_fact:
+ l_docker_image_prepend: "registry.access.redhat.com/openshift3"
+ when: ansible_distribution == 'RedHat'
+
+ - name: Use Fedora Registry for image when distribution is Fedora
+ set_fact:
+ l_docker_image_prepend: "registry.fedoraproject.org"
+ when: ansible_distribution == 'Fedora'
+
+ # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
+ - name: Use a testing registry if requested
+ set_fact:
+ l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_registry_override is defined
+ - openshift_docker_systemcontainer_image_registry_override != ""
+
+ - name: Set the full image name
+ set_fact:
+ l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+
+- name: Pre-pull Container Enginer System Container image
+ command: "atomic pull --storage ostree {{ l_docker_image }}"
+ changed_when: false
+
+# Make sure docker is disabled Errors are ignored as docker may not
+# be installed.
+- name: Disable Docker
+ systemd:
+ name: docker
+ enabled: no
+ state: stopped
+ daemon_reload: yes
+ ignore_errors: True
+
+- name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
+
+- name: Ensure /etc/docker directory exists
+ file:
+ path: "{{ docker_conf_dir }}"
+ state: directory
+
+- name: Install Container Enginer System Container
+ oc_atomic_container:
+ name: "{{ openshift.docker.service_name }}"
+ image: "{{ l_docker_image }}"
+ state: latest
+ values:
+ - "system-package=no"
+
+- name: Configure Container Engine Service File
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: systemcontainercustom.conf.j2
+
+# Set local versions of facts that must be in json format for daemon.json
+# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson
+- set_fact:
+ l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
+ l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
+ l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}"
+ l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
+ l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
+
+# Configure container-engine using the daemon.json file
+- name: Configure Container Engine
+ template:
+ dest: "{{ docker_conf_dir }}/daemon.json"
+ src: daemon.json
+
+# Enable and start the container-engine service
+- name: Start the Container Engine service
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: start_result
+
+- set_fact:
+ docker_service_status_changed: start_result | changed
+
+- meta: flush_handlers
diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json
new file mode 100644
index 000000000..c607e6afe
--- /dev/null
+++ b/roles/docker/templates/daemon.json
@@ -0,0 +1,21 @@
+{
+ "authorization-plugins": ["rhel-push-plugin"],
+ "default-runtime": "oci",
+ "containerd": "/run/containerd.sock",
+ "disable-legacy-registry": false,
+ "exec-opts": ["native.cgroupdriver=systemd"],
+ "insecure-registries": {{ l_docker_insecure_registries }},
+{% if docker_log_driver is defined %}
+ "log-driver": "{{ docker_log_driver }}",
+{%- endif %}
+ "log-opts": {{ l_docker_log_options }},
+ "runtimes": {
+ "oci": {
+ "path": "/usr/libexec/docker/docker-runc-current"
+ }
+ },
+ "selinux-enabled": {{ l_docker_selinux_enabled | lower }},
+ "add-registry": {{ l_docker_additional_registries }},
+ "block-registry": {{ l_docker_blocked_registries }},
+ "userland-proxy-path": "/usr/libexec/docker/docker-proxy-current"
+}
diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2
new file mode 100644
index 000000000..1faad506a
--- /dev/null
+++ b/roles/docker/templates/systemcontainercustom.conf.j2
@@ -0,0 +1,17 @@
+# {{ ansible_managed }}
+
+[Service]
+{%- if "http_proxy" in openshift.common %}
+ENVIRONMENT=HTTP_PROXY={{ docker_http_proxy }}
+{%- endif -%}
+{%- if "https_proxy" in openshift.common %}
+ENVIRONMENT=HTTPS_PROXY={{ docker_http_proxy }}
+{%- endif -%}
+{%- if "no_proxy" in openshift.common %}
+ENVIRONMENT=NO_PROXY={{ docker_no_proxy }}
+{%- endif %}
+{%- if os_firewall_use_firewalld|default(false) %}
+[Unit]
+Wants=iptables.service
+After=iptables.service
+{%- endif %}
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
index 5237ed8f2..0082ded1e 100644
--- a/roles/docker/vars/main.yml
+++ b/roles/docker/vars/main.yml
@@ -1,3 +1,4 @@
---
-udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
docker_systemd_dir: /etc/systemd/system/docker.service.d
+docker_conf_dir: /etc/docker/
+udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 29153f4df..e45f53219 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -13,5 +13,4 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_data_dir: /var/lib/etcd/
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh
deleted file mode 100644
index 0e324a8a9..000000000
--- a/roles/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
-# command flags are different between the two. Should work on stand
-# alone etcd hosts and master + etcd hosts too because we use the peer keys.
-etcdctl2() {
- /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@}
-}
-
-etcdctl3() {
- ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@}
-}
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index e0c70a181..689c07a84 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -24,3 +24,4 @@ dependencies:
- service: etcd peering
port: "{{ etcd_peer_port }}/tcp"
- role: etcd_server_certificates
+- role: etcd_common
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index c09da3b61..fa2f44609 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -10,51 +10,45 @@
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
+- block:
+ - name: Pull etcd container
+ command: docker pull {{ openshift.etcd.etcd_image }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
when:
- etcd_is_containerized | bool
- not openshift.common.is_etcd_system_container | bool
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
- when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
-
-
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
-
-- name: Create configuration directory
- file:
- path: "{{ etcd_conf_dir }}"
- state: directory
- mode: 0700
- when: etcd_is_thirdparty | bool
+- block:
+ - name: Create configuration directory
+ file:
+ path: "{{ etcd_conf_dir }}"
+ state: directory
+ mode: 0700
# TODO: retest with symlink to confirm it does or does not function
-- name: Copy service file for etcd instance
- copy:
- src: /usr/lib/systemd/system/etcd.service
- dest: "/etc/systemd/system/{{ etcd_service }}.service"
- remote_src: True
- when: etcd_is_thirdparty | bool
-
-- name: Create third party etcd service.d directory exists
- file:
- path: "{{ etcd_systemd_dir }}"
- state: directory
- when: etcd_is_thirdparty | bool
-
-- name: Configure third part etcd service unit file
- template:
- dest: "{{ etcd_systemd_dir }}/custom.conf"
- src: custom.conf.j2
+ - name: Copy service file for etcd instance
+ copy:
+ src: /usr/lib/systemd/system/etcd.service
+ dest: "/etc/systemd/system/{{ etcd_service }}.service"
+ remote_src: True
+
+ - name: Create third party etcd service.d directory exists
+ file:
+ path: "{{ etcd_systemd_dir }}"
+ state: directory
+
+ - name: Configure third part etcd service unit file
+ template:
+ dest: "{{ etcd_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
when: etcd_is_thirdparty
# TODO: this task may not be needed with Validate permissions
@@ -80,28 +74,28 @@
command: systemctl daemon-reload
when: etcd_is_thirdparty | bool
-- name: Disable system etcd when containerized
- systemd:
- name: etcd
- state: stopped
- enabled: no
- masked: yes
- daemon_reload: yes
- when:
- - etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
- register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
-
-- name: Install etcd container service file
- template:
- dest: "/etc/systemd/system/etcd_container.service"
- src: etcd.docker.service
- when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool
-
-- name: Install Etcd system container
- include: system_container.yml
- when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool
+- block:
+ - name: Disable system etcd when containerized
+ systemd:
+ name: etcd
+ state: stopped
+ enabled: no
+ masked: yes
+ daemon_reload: yes
+ when: not openshift.common.is_etcd_system_container | bool
+ register: task_result
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+
+ - name: Install etcd container service file
+ template:
+ dest: "/etc/systemd/system/etcd_container.service"
+ src: etcd.docker.service
+ when: not openshift.common.is_etcd_system_container | bool
+
+ - name: Install Etcd system container
+ include: system_container.yml
+ when: openshift.common.is_etcd_system_container | bool
+ when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
file:
@@ -126,7 +120,9 @@
enabled: yes
register: start_result
-- include: etcdctl.yml
+- include_role:
+ name: etcd_common
+ tasks_from: etcdctl.yml
when: openshift_etcd_etcdctl_profile | default(true) | bool
- name: Set fact etcd_service_status_changed
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index ae059b549..adeca7a91 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -1,17 +1,17 @@
[Unit]
Description=The Etcd Server container
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
-EnvironmentFile=/etc/etcd/etcd.conf
+EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
index 131a01490..d1c3a6602 100644
--- a/roles/etcd_common/README.md
+++ b/roles/etcd_common/README.md
@@ -1,17 +1,21 @@
etcd_common
========================
-TODO
+Common resources for dependent etcd roles. E.g. default variables for:
+* config directories
+* certificates
+* ports
+* other settings
-Requirements
-------------
-
-TODO
+Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g.
-Role Variables
---------------
+```yaml
+- delegated_serial_command:
+ command: /usr/bin/make_database.sh arg1 arg2
+ creates: /path/to/database
+```
-TODO
+Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example).
Dependencies
------------
@@ -21,7 +25,22 @@ openshift-repos
Example Playbook
----------------
-TODO
+**Drop etcdctl aliases**
+
+```yaml
+- include_role:
+ name: etcd_common
+ tasks_from: etcdctl
+```
+
+**Get access to common variables**
+
+```yaml
+# meta.yml of etcd
+...
+dependencies:
+- { role: etcd_common }
+```
License
-------
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index c5efb0a0c..d12e6a07f 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -35,3 +35,6 @@ etcd_ip: "{{ ansible_default_ipv4.address }}"
etcd_is_atomic: False
etcd_is_containerized: False
etcd_is_thirdparty: False
+
+# etcd dir vars
+etcd_data_dir: /var/lib/etcd/
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd_common/tasks/etcdctl.yml
index 649ad23c1..6cb456677 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd_common/tasks/etcdctl.yml
@@ -4,9 +4,9 @@
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
- copy:
- src: etcdctl.sh
- dest: /etc/profile.d/etcdctl.sh
+ template:
+ dest: "/etc/profile.d/etcdctl.sh"
+ src: etcdctl.sh.j2
mode: 0755
owner: root
group: root
diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd_common/templates/etcdctl.sh.j2
new file mode 100644
index 000000000..ac7d9c72f
--- /dev/null
+++ b/roles/etcd_common/templates/etcdctl.sh.j2
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because
+# command flags are different between the two. Should work on stand
+# alone etcd hosts and master + etcd hosts too because we use the peer keys.
+etcdctl2() {
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@}
+
+}
+
+etcdctl3() {
+ ETCDCTL_API=3 /usr/bin/etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@}
+}
diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml
index 98c913dba..b453f2bd8 100644
--- a/roles/etcd_server_certificates/meta/main.yml
+++ b/roles/etcd_server_certificates/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- role: openshift_etcd_ca
+- role: etcd_ca
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 94d1d18fb..c60c2115a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -5,4 +5,6 @@
- name: restart docker
become: yes
- systemd: name=docker state=restarted
+ systemd:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 8a311cd0f..03d3e17c4 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -166,7 +166,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1080,7 +1080,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 0930faadb..7493b5c3d 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -152,7 +152,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1066,7 +1066,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 6a7be65d0..5e72f5954 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1052,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 44923ecd2..371a3953b 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -138,7 +138,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1052,7 +1052,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index 0604f48bb..7240521c6 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -256,7 +256,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1170,7 +1170,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -2538,25 +2538,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index bdcf94a58..a54c62cd4 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -281,7 +281,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1195,7 +1195,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index af48ce636..78c72ef26 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -130,7 +130,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1044,7 +1044,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 385ed888b..c88f56fc6 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -136,7 +136,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1050,7 +1050,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 649de547e..17e3f7dde 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1094,7 +1094,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 74bf63353..18ab97bc0 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -147,7 +147,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1061,7 +1061,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 2dd3d28ec..88c6ef209 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -120,7 +120,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1034,7 +1034,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index bb7f97689..45860cbe5 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -139,7 +139,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1053,7 +1053,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index ec9abcda7..65923a698 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -156,7 +156,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1070,7 +1070,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 3abd50a2e..1d75a21b9 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -159,7 +159,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1073,7 +1073,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
@@ -1548,7 +1548,7 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0):
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 0b9e734af..72add01f4 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -91,7 +91,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1005,7 +1005,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index de5426c51..8e1ffe90f 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -148,7 +148,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1062,7 +1062,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 02cd810ce..a06852fd8 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -145,7 +145,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1059,7 +1059,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index a9103ebf6..79673452d 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -140,7 +140,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1054,7 +1054,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index f005adffc..ad705a6c5 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -190,7 +190,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1104,7 +1104,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 9dcb38216..291ac8b19 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -134,7 +134,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1048,7 +1048,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 2ac0abcec..df28df2bc 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -180,7 +180,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1094,7 +1094,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index 0af695e08..e98f83cc3 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -186,7 +186,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1100,7 +1100,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index ba8a1fdac..f00e9e4f6 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1046,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 5bff7621c..6691495a6 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -132,7 +132,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1046,7 +1046,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 450a30f57..72f2fbf03 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -192,7 +192,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1106,7 +1106,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 0937df5a1..bc3340a94 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -104,7 +104,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1018,7 +1018,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index d0e7e77e1..9dec0a6d4 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -80,6 +80,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
@@ -169,7 +181,7 @@ class YeditException(Exception): # pragma: no cover
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
@@ -1083,7 +1095,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 720b44cdc..3c130fe28 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -331,25 +331,34 @@ class Registry(OpenShiftCLI):
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
+ registry_options = {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ }
+
+ # Do not always pass the daemonset and enforce-quota parameters because they are not understood
+ # by old versions of oc.
+ # Default value is false. So, it's safe to not pass an explicit false value to oc versions which
+ # understand these parameters.
+ if params['daemonset']:
+ registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
+ if params['enforce_quota']:
+ registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
+
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
- {'images': {'value': params['images'], 'include': True},
- 'latest_images': {'value': params['latest_images'], 'include': True},
- 'labels': {'value': params['labels'], 'include': True},
- 'ports': {'value': ','.join(params['ports']), 'include': True},
- 'replicas': {'value': params['replicas'], 'include': True},
- 'selector': {'value': params['selector'], 'include': True},
- 'service_account': {'value': params['service_account'], 'include': True},
- 'mount_host': {'value': params['mount_host'], 'include': True},
- 'env_vars': {'value': params['env_vars'], 'include': False},
- 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
- 'edits': {'value': params['edits'], 'include': False},
- 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
- 'daemonset': {'value': params['daemonset'], 'include': True},
- 'tls_key': {'value': params['tls_key'], 'include': True},
- 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
- })
+ registry_options)
ocregistry = Registry(rconfig, params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 89ee2f5a0..6f0da3d5c 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -117,7 +117,7 @@ class OCObject(OpenShiftCLI):
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
- (len(api_rval['results']) == 0 or len(api_rval['results'][0].getattr('items', [])) == 0):
+ (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0):
return {'changed': False, 'state': state}
if check_mode:
diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume
index 1d04afeef..43ff78c9f 100644
--- a/roles/lib_openshift/src/doc/volume
+++ b/roles/lib_openshift/src/doc/volume
@@ -29,6 +29,18 @@ options:
required: false
default: False
aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ vol_name:
+ description:
+ - Name of the volume that is being queried.
+ required: false
+ default: None
+ aliases: []
namespace:
description:
- The name of the namespace where the object lives
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index fc1b6f1ec..2bf795e25 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -256,7 +256,7 @@ class OpenShiftCLI(object):
stdout, stderr = proc.communicate(input_data)
- return proc.returncode, stdout.decode(), stderr.decode()
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
index 6990a11a8..f350bd25d 100644
--- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
+++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in testing
'''
diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml
index b4e721407..22cf687c5 100755
--- a/roles/lib_openshift/src/test/integration/oc_label.yml
+++ b/roles/lib_openshift/src/test/integration/oc_label.yml
@@ -15,7 +15,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} not defined"
- when: "{{ item }} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml
index ad1f9d188..9b4290052 100755
--- a/roles/lib_openshift/src/test/integration/oc_user.yml
+++ b/roles/lib_openshift/src/test/integration/oc_user.yml
@@ -14,7 +14,7 @@
- name: ensure needed vars are defined
fail:
msg: "{{ item }} no defined"
- when: "{{ item}} is not defined"
+ when: item is not defined
with_items:
- cli_master_test # ansible inventory instance to run playbook against
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index 30e13ce4b..97cf86170 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -254,7 +254,7 @@ class RegistryTest(unittest.TestCase):
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False',
+ mock.call(['oc', 'adm', 'registry',
'--ports=5000', '--replicas=1', '--selector=type=infra',
'--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
index ee98470b0..cf33e48d5 100644
--- a/roles/lib_utils/library/repoquery.py
+++ b/roles/lib_utils/library/repoquery.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -421,15 +422,16 @@ class RepoqueryCLI(object):
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -437,6 +439,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -448,6 +452,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -519,6 +526,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -541,6 +562,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -552,6 +576,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
@@ -592,6 +617,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index 9adaeeb52..baf72fe47 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -34,6 +34,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
@@ -212,7 +213,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
index cb4efa6c1..40773b1c1 100644
--- a/roles/lib_utils/src/ansible/repoquery.py
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -18,6 +18,7 @@ def main():
verbose=dict(default=False, required=False, type='bool'),
show_duplicates=dict(default=False, required=False, type='bool'),
match_version=dict(default=None, required=False, type='str'),
+ ignore_excluders=dict(default=False, required=False, type='bool'),
),
supports_check_mode=False,
required_if=[('show_duplicates', True, ['name'])],
diff --git a/roles/lib_utils/src/class/repoquery.py b/roles/lib_utils/src/class/repoquery.py
index 82adcada5..28e3a3e89 100644
--- a/roles/lib_utils/src/class/repoquery.py
+++ b/roles/lib_utils/src/class/repoquery.py
@@ -5,15 +5,16 @@
class Repoquery(RepoqueryCLI):
''' Class to wrap the repoquery
'''
- # pylint: disable=too-many-arguments
+ # pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(self, name, query_type, show_duplicates,
- match_version, verbose):
+ match_version, ignore_excluders, verbose):
''' Constructor for YumList '''
super(Repoquery, self).__init__(None)
self.name = name
self.query_type = query_type
self.show_duplicates = show_duplicates
self.match_version = match_version
+ self.ignore_excluders = ignore_excluders
self.verbose = verbose
if self.match_version:
@@ -21,6 +22,8 @@ class Repoquery(RepoqueryCLI):
self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+ self.tmp_file = None
+
def build_cmd(self):
''' build the repoquery cmd options '''
@@ -32,6 +35,9 @@ class Repoquery(RepoqueryCLI):
if self.show_duplicates:
repo_cmd.append('--show-duplicates')
+ if self.ignore_excluders:
+ repo_cmd.append('--config=' + self.tmp_file.name)
+
repo_cmd.append(self.name)
return repo_cmd
@@ -103,6 +109,20 @@ class Repoquery(RepoqueryCLI):
def repoquery(self):
'''perform a repoquery '''
+ if self.ignore_excluders:
+ # Duplicate yum.conf and reset exclude= line to an empty string
+ # to clear a list of all excluded packages
+ self.tmp_file = tempfile.NamedTemporaryFile()
+
+ with open("/etc/yum.conf", "r") as file_handler:
+ yum_conf_lines = file_handler.readlines()
+
+ yum_conf_lines = ["exclude=" if l.startswith("exclude=") else l for l in yum_conf_lines]
+
+ with open(self.tmp_file.name, "w") as file_handler:
+ file_handler.writelines(yum_conf_lines)
+ file_handler.flush()
+
repoquery_cmd = self.build_cmd()
rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
@@ -125,6 +145,9 @@ class Repoquery(RepoqueryCLI):
else:
rval['package_found'] = False
+ if self.ignore_excluders:
+ self.tmp_file.close()
+
return rval
@staticmethod
@@ -136,6 +159,7 @@ class Repoquery(RepoqueryCLI):
params['query_type'],
params['show_duplicates'],
params['match_version'],
+ params['ignore_excluders'],
params['verbose'],
)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index e0a27012f..957c35a06 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -11,7 +11,7 @@ class YeditException(Exception):
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
index b0ab7c92c..567f8c9e0 100644
--- a/roles/lib_utils/src/lib/import.py
+++ b/roles/lib_utils/src/lib/import.py
@@ -9,6 +9,7 @@ import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
+import tempfile # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
diff --git a/roles/lib_utils/src/test/unit/test_repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index e39d9d83f..9991ecd14 100755
--- a/roles/lib_utils/src/test/unit/test_repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
@@ -37,6 +37,7 @@ class RepoQueryTest(unittest.TestCase):
'verbose': False,
'show_duplicates': False,
'match_version': None,
+ 'ignore_excluders': False,
}
valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 3b17d9ed6..c7b906949 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -95,7 +95,7 @@
{% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %}
--certificate-authority {{ legacy_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index 5f102e960..a2bc9ecdb 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
"""
Custom filters for use in openshift-ansible
"""
@@ -35,7 +34,7 @@ Example playbook usage:
become: no
run_once: yes
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
copy:
content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
dest: "{{ openshift_certificate_expiry_json_results_path }}"
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index c204b5341..0242f5b43 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -135,7 +135,7 @@ platforms missing the Python OpenSSL library.
continue
elif l.startswith('Subject:'):
- # O=system:nodes, CN=system:node:m01.example.com
+ # O = system:nodes, CN = system:node:m01.example.com
self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
def get_serial_number(self):
@@ -202,7 +202,7 @@ object"""
"""
self.subjects = []
for s in subject_string.split(', '):
- name, _, value = s.partition('=')
+ name, _, value = s.partition(' = ')
self.subjects.append((name, value))
def get_components(self):
diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml
index 139d5de6e..b5234bd1e 100644
--- a/roles/openshift_certificate_expiry/tasks/main.yml
+++ b/roles/openshift_certificate_expiry/tasks/main.yml
@@ -13,12 +13,12 @@
src: cert-expiry-table.html.j2
dest: "{{ openshift_certificate_expiry_html_report_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_generate_html_report|bool }}"
+ when: openshift_certificate_expiry_generate_html_report|bool
- name: Generate the result JSON string
run_once: yes
set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
- name: Generate results JSON file
become: no
@@ -27,4 +27,4 @@
src: save_json_results.j2
dest: "{{ openshift_certificate_expiry_json_results_path }}"
delegate_to: localhost
- when: "{{ openshift_certificate_expiry_save_json_results|bool }}"
+ when: openshift_certificate_expiry_save_json_results|bool
diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
index ccdd48fa8..8a521a765 100644
--- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
+++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py
@@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402
@pytest.fixture(scope='module')
def fake_valid_cert(valid_cert):
- cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text']
+ cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text',
+ '-nameopt', 'oneline']
cert = subprocess.check_output(cmd)
return FakeOpenSSLCertificate(cert.decode('utf8'))
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 4ed3e1f01..57ac16602 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -1,8 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=missing-docstring,invalid-name
-#
import random
import tempfile
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index f22dd4520..5788e6d74 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -7,4 +7,4 @@
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
src: openstack.conf.j2
- when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
+ when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 049ceffe0..350512452 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -16,6 +16,7 @@
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
+ use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
- set_fact:
docker_additional_registries: "{{ openshift.docker.additional_registries
diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_etcd_ca/tasks/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index e048bd107..df45c28bf 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -25,16 +25,19 @@ None
Dependencies
------------
+- openshift_facts
+- openshift_repos
+- lib_utils
Tasks to include
----------------
-- exclude: enable excluders (assuming excluders are installed)
-- unexclude: disable excluders (assuming excluders are installed)
+- exclude: enable excluders
+- unexclude: disable excluders
- install: install excluders (installation is followed by excluder enabling)
-- enable: enable excluders (optionally with installation step)
-- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
-- status: determine status of excluders
+- enable: enable excluders (install excluder(s) if not installed)
+- disabled: disable excluders (install excluder(s) if not installed)
+
Example Playbook
----------------
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 4d1c1efca..c6081cdb2 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -14,3 +14,4 @@ galaxy_info:
dependencies:
- { role: openshift_facts }
- { role: openshift_repos }
+- { role: lib_utils }
diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml
new file mode 100644
index 000000000..aebdb8c58
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_excluder.yml
@@ -0,0 +1,35 @@
+---
+# input variables:
+# - repoquery_cmd
+# - excluder
+# - openshift_upgrade_target
+- block:
+ - name: Get available excluder version
+ repoquery:
+ name: "{{ excluder }}"
+ ignore_excluders: true
+ register: excluder_out
+
+ - fail:
+ msg: "Package {{ excluder }} not found"
+ when: not excluder_out.results.package_found
+
+ - set_fact:
+ excluder_version: "{{ excluder_out.results.versions.available_versions.0 }}"
+
+ - name: "{{ excluder }} version detected"
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version }}"
+
+ - name: Printing upgrade target version
+ debug:
+ msg: "{{ openshift_upgrade_target }}"
+
+ - name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version"
+ when:
+ - "{{ excluder_version != '' }}"
+ - "{{ excluder_version.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
new file mode 100644
index 000000000..6ea2130ac
--- /dev/null
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -0,0 +1,15 @@
+---
+# input variables
+# - repoquery_cmd
+# - openshift_upgrade_target
+- include: init.yml
+
+- include: verify_excluder.yml
+ vars:
+ excluder: "{{ openshift.common.service_type }}-docker-excluder"
+ when: docker_excluder_on
+
+- include: verify_excluder.yml
+ vars:
+ excluder: "{{ openshift.common.service_type }}-excluder"
+ when: openshift_excluder_on
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 00603f4fa..4cb5418c6 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -6,7 +6,7 @@
- name: Determine if growpart is installed
command: "rpm -q cloud-utils-growpart"
register: has_growpart
- failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+ failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
changed_when: false
when: openshift.common.is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index ca0279426..914e46c05 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,7 +1,6 @@
#!/usr/bin/python
# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# Reason: Disable pylint too-many-lines because we don't want to split up this file.
# Status: Permanently disabled to keep this module as self-contained as possible.
@@ -1792,6 +1791,12 @@ def set_container_facts_if_unset(facts):
deployer_image = 'openshift/origin-deployer'
facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
+ # If openshift_docker_use_system_container is set and is True ....
+ if 'use_system_container' in list(facts['docker'].keys()):
+ if facts['docker']['use_system_container']:
+ # ... set the service name to container-engine
+ facts['docker']['service_name'] = 'container-engine'
+
if 'is_containerized' not in facts['common']:
facts['common']['is_containerized'] = facts['common']['is_atomic']
if 'cli_image' not in facts['common']:
@@ -1911,14 +1916,16 @@ class OpenShiftFacts(object):
)
self.role = role
+ # Collect system facts and preface each fact with 'ansible_'.
try:
- # ansible-2.1
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
+ additional_facts = {}
for (k, v) in self.system_facts.items():
- self.system_facts["ansible_%s" % k.replace('-', '_')] = v
+ additional_facts["ansible_%s" % k.replace('-', '_')] = v
+ self.system_facts.update(additional_facts)
except UnboundLocalError:
- # ansible-2.2
+ # ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
@@ -2072,6 +2079,7 @@ class OpenShiftFacts(object):
hosted_registry_insecure = get_hosted_registry_insecure()
if hosted_registry_insecure is not None:
docker['hosted_registry_insecure'] = hosted_registry_insecure
+ docker['service_name'] = 'docker'
defaults['docker'] = docker
if 'clock' in roles:
@@ -2159,7 +2167,9 @@ class OpenShiftFacts(object):
glusterfs=dict(
endpoints='glusterfs-registry-endpoints',
path='glusterfs-registry-volume',
- readOnly=False),
+ readOnly=False,
+ swap=False,
+ swapcopy=True),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 208e81048..7bce7f107 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -1,4 +1,3 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible callback plugin.
'''
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index a46589443..4460ec324 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module for yum-based systems determining if multiple releases
of an OpenShift package are available, and if the release requested
diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py
index 630ebc848..433795b67 100755
--- a/roles/openshift_health_checker/library/check_yum_update.py
+++ b/roles/openshift_health_checker/library/check_yum_update.py
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Ansible module to test whether a yum update or install will succeed,
without actually performing it or running yum.
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 6d576df71..3e5d7f860 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -28,6 +28,14 @@ From this role:
| openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |
| openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. |
+If you specify `openshift_hosted_registry_kind=glusterfs`, the following
+variables also control configuration behavior:
+
+| Name | Default value | Description |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume |
+
Dependencies
------------
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 6e691c26f..751489958 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,7 +61,7 @@
name: "{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
-- name: Grant the registry serivce account access to the appropriate scc
+- name: Grant the registry service account access to the appropriate scc
oc_adm_policy_user:
user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
namespace: "{{ openshift_hosted_registry_namespace }}"
@@ -126,4 +126,4 @@
- include: storage/glusterfs.yml
when:
- - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs'
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
index b18b24266..e6bb196b8 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -1,10 +1,18 @@
---
+- name: Get registry DeploymentConfig
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: dc
+ name: "{{ openshift_hosted_registry_name }}"
+ register: registry_dc
+
- name: Wait for registry pods
oc_obj:
namespace: "{{ openshift_hosted_registry_namespace }}"
state: list
kind: pod
- selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}"
+ selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
register: registry_pods
until:
- "registry_pods.results.results[0]['items'] | count > 0"
@@ -38,6 +46,39 @@
mode: "2775"
recurse: True
+- block:
+ - name: Activate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+
+ - name: Get first registry pod name
+ set_fact:
+ registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}"
+
+ - name: Copy current registry contents to new GlusterFS volume
+ command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
+ when: openshift.hosted.registry.storage.glusterfs.swapcopy
+
+ - name: Swap new GlusterFS registry volume
+ oc_volume:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ vol_name: registry-storage
+ mount_type: pvc
+ claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+
+ - name: Deactivate registry maintenance mode
+ oc_env:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ name: "{{ openshift_hosted_registry_name }}"
+ state: absent
+ env_vars:
+ - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
- name: Unmount registry volume
mount:
state: unmounted
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index afd82766f..78b624109 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -36,7 +36,7 @@
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+ failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
- name: "Create templates for logging accounts and the deployer"
command: >
@@ -60,21 +60,21 @@
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+ failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
- name: "Set permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- name: "Set additional permissions for fluentd"
command: >
{{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
command: >
@@ -82,13 +82,13 @@
policy add-cluster-role-to-user rolebinding-reader \
system:serviceaccount:logging:aggregated-logging-elasticsearch
register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+ failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
- name: "Create ConfigMap for deployer parameters"
command: >
{{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+ failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
- name: "Process the deployer template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 6a442cefc..15dd1bd54 100644
--- a/roles/openshift_hosted_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -81,7 +81,7 @@
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
changed_when: metrics_deployer_secret.rc == 0
- failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+ failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
# TODO: extend this to allow user passed in certs or generating cert with
# OpenShift CA
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 28feac4e6..8fe02444e 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -103,9 +103,9 @@ parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
- - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.5", set version "3.5"'
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.6", set version "3.6"'
name: IMAGE_VERSION
- value: "3.5"
+ value: "3.6"
- description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
name: OPENSHIFT_OAUTH_PROVIDER_URL
required: true
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
index 5385df3b7..72182fcdd 100644
--- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 42f4fc72e..cba0f2de8 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -91,8 +91,6 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
-- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'.
-- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 5ee8d1e2a..f43336dc4 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
openshift_logging_namespace: logging
+openshift_logging_nodeselector: null
+openshift_logging_labels: {}
+openshift_logging_label_key: ""
+openshift_logging_label_value: ""
openshift_logging_install_logging: True
openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
@@ -22,10 +26,10 @@ openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_cpu_limit: null
-openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
-openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -46,10 +50,10 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
openshift_logging_kibana_ops_cpu_limit: null
-openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_memory_limit: 736Mi
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
-openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: 96Mi
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -68,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"
openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
@@ -113,8 +117,6 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
-openshift_logging_es_ops_number_of_shards: 1
-openshift_logging_es_ops_number_of_replicas: 0
# storage related defaults
openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}"
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index 44bd0058a..b047eb35a 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -21,6 +21,8 @@
dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
vars:
- allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
when: es_config_contents is undefined
changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
index e77da7a24..f76bb3a0a 100644
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -1,14 +1,14 @@
---
- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ when: openshift_logging_kibana_key | trim | length > 0
changed_when: false
- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: "{{openshift_logging_kibana_cert | trim | length > 0}}"
+ when: openshift_logging_kibana_cert | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: "{{openshift_logging_kibana_ca | trim | length > 0}}"
+ when: openshift_logging_kibana_ca | trim | length > 0
changed_when: false
- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index b80f37892..a981e7f7f 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -3,7 +3,10 @@
set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+ when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
+
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
### evaluate if the PVC attached to the dc currently matches the provided vars
## if it does then we reuse that pvc in the DC
@@ -12,19 +15,19 @@
es_component: es
es_name: "{{ deployment.0 }}"
es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}"
es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
- "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
+ - "{{ es_indices | default([]) }}"
loop_control:
loop_var: deployment
## if it does not then we should create one that does and attach it
@@ -35,16 +38,15 @@
es_component: es
es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}"
es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
# --------- Tasks for Operation clusters ---------
@@ -65,26 +67,31 @@
check_mode: no
- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
+ when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
+
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
- include: set_es_storage.yaml
vars:
es_component: es-ops
es_name: "{{ deployment.0 }}"
es_spec: "{{ deployment.1 }}"
+ es_pvc_count: "{{ deployment.2 | int }}"
es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}"
es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
with_together:
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
+ - "{{ es_ops_indices | default([]) }}"
loop_control:
loop_var: deployment
when:
@@ -97,16 +104,15 @@
es_component: es-ops
es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
es_spec: "{}"
+ es_pvc_count: "{{ item | int - 1 }}"
es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() }}"
+ es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}"
es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}"
- es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"
with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
when:
- openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
index 35273829c..6bc405819 100644
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -32,7 +32,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
check_mode: no
when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -49,6 +49,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
check_mode: no
when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
index 296da626f..91eeb95a1 100644
--- a/roles/openshift_logging/tasks/install_mux.yaml
+++ b/roles/openshift_logging/tasks/install_mux.yaml
@@ -45,7 +45,7 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: mux_output
- failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr"
+ failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
check_mode: no
when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
@@ -62,6 +62,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
register: mux2_output
- failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr"
+ failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
check_mode: no
when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
index da0bbb627..877ce3149 100644
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -1,17 +1,36 @@
---
# This is the base configuration for installing the other components
-- name: Check for logging project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
- register: logging_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
-- name: "Create logging project"
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
- when: not ansible_check_mode and "not found" in logging_project_result.stderr
+- name: Labelling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
- name: Create logging cert directory
file: path={{openshift.common.config_base}}/logging state=directory mode=0755
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index c7f4a2f93..387da618d 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- fail:
msg: Only one Fluentd nodeselector key pair should be provided
- when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+ when: openshift_logging_fluentd_nodeselector.keys() | count > 1
- name: Set default image variables based on deployment_type
include_vars: "{{ item }}"
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
index c4db7d033..a0ed56ebd 100644
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -6,7 +6,7 @@
namespace: "{{ namespace }}"
files:
- "{{ file_name }}"
- when: file_content.kind != "Service"
+ when: file_content.kind not in ["Service", "Route"]
## still need to do this for services until the template logic is replaced by oc_*
- block:
@@ -49,4 +49,4 @@
failed_when: "'error' in generation_apply.stderr"
changed_when: generation_apply.rc == 0
when: "'field is immutable' in generation_apply.stderr"
- when: file_content.kind == "Service"
+ when: file_content.kind in ["Service", "Route"]
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
index 198b1d04d..4afe4e641 100644
--- a/roles/openshift_logging/tasks/set_es_storage.yaml
+++ b/roles/openshift_logging/tasks/set_es_storage.yaml
@@ -36,7 +36,7 @@
- name: Generating PersistentVolumeClaims
template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
size: "{{ es_pvc_size }}"
access_modes: "{{ openshift_logging_storage_access_modes }}"
pv_selector: "{{ es_pv_selector }}"
@@ -47,7 +47,7 @@
- name: Generating PersistentVolumeClaims - Dynamic
template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
annotations:
volume.alpha.kubernetes.io/storage-class: "dynamic"
size: "{{ es_pvc_size }}"
@@ -57,7 +57,7 @@
check_mode: no
changed_when: no
- - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names | count }}"
+ - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
when:
- es_pvc_size | search('^\d.*')
@@ -76,7 +76,5 @@
es_memory_limit: "{{ es_memory_limit }}"
es_node_selector: "{{ es_node_selector }}"
es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
- es_number_of_shards: "{{ es_number_of_shards }}"
- es_number_of_replicas: "{{ es_number_of_replicas }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index 1042b3daa..c1592b830 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -36,10 +36,13 @@
name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
loop_control:
loop_var: object
- when: openshift_logging_use_mux
+ when:
+ - mux_dc.results is defined
+ - mux_dc.results.results is defined
+ - openshift_logging_use_mux
- name: Retrieve elasticsearch
oc_obj:
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index d20c57cc1..f4b419d84 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -36,7 +36,7 @@
name: "{{ object }}"
namespace: "{{openshift_logging_namespace}}"
replicas: 0
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
loop_control:
loop_var: object
when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
index cef835668..10f522b61 100644
--- a/roles/openshift_logging/tasks/update_master_config.yaml
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.loggingPublicURL
yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index d13691259..5c93d823e 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -59,6 +59,9 @@ spec:
- name: dockercfg
mountPath: /etc/sysconfig/docker
readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
{% if openshift_logging_use_mux_client | bool %}
- name: muxcerts
mountPath: /etc/fluent/muxkeys
@@ -154,6 +157,9 @@ spec:
- name: dockercfg
hostPath:
path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
{% if openshift_logging_use_mux_client | bool %}
- name: muxcerts
secret:
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index e6ecf82ff..25fab9ac4 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -44,15 +44,19 @@ spec:
{% if kibana_cpu_limit is not none %}
cpu: "{{kibana_cpu_limit}}"
{% endif %}
-{% if kibana_memory_limit is not none %}
- memory: "{{kibana_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_memory_limit | default('736Mi') }}"
{% endif %}
env:
- name: "ES_HOST"
value: "{{es_host}}"
- name: "ES_PORT"
value: "{{es_port}}"
+ -
+ name: "KIBANA_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana
+ resource: limits.memory
volumeMounts:
- name: kibana
mountPath: /etc/kibana/keys
@@ -67,9 +71,7 @@ spec:
{% if kibana_proxy_cpu_limit is not none %}
cpu: "{{kibana_proxy_cpu_limit}}"
{% endif %}
-{% if kibana_proxy_memory_limit is not none %}
- memory: "{{kibana_proxy_memory_limit}}"
-{% endif %}
+ memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"
{% endif %}
ports:
-
@@ -103,6 +105,27 @@ spec:
-
name: "OAP_DEBUG"
value: "{{openshift_logging_kibana_proxy_debug}}"
+ -
+ name: "OAP_OAUTH_SECRET_FILE"
+ value: "/secret/oauth-secret"
+ -
+ name: "OAP_SERVER_CERT_FILE"
+ value: "/secret/server-cert"
+ -
+ name: "OAP_SERVER_KEY_FILE"
+ value: "/secret/server-key"
+ -
+ name: "OAP_SERVER_TLS_FILE"
+ value: "/secret/server-tls.json"
+ -
+ name: "OAP_SESSION_SECRET_FILE"
+ value: "/secret/session-secret"
+ -
+ name: "OCP_AUTH_PROXY_MEMORY_LIMIT"
+ valueFrom:
+ resourceFieldRef:
+ containerName: kibana-proxy
+ resource: limits.memory
volumeMounts:
- name: kibana-proxy
mountPath: /secret
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 98e0da1a2..5522fef26 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -194,7 +194,7 @@
state: stopped
when: openshift_master_ha | bool
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 506c8b129..cfa860edf 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -32,6 +32,15 @@
- not openshift.common.is_master_system_container | bool
register: create_master_unit_file
+- name: Install Master service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
+ src: "{{ openshift.common.service_type }}-master.service"
+ register: create_master_unit_file
+ when:
+ - not openshift.common.is_containerized | bool
+ - (openshift.master.ha is not defined or not openshift.master.ha) | bool
+
- command: systemctl daemon-reload
when: create_master_unit_file | changed
@@ -90,6 +99,7 @@
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
line: "{{ item }}"
with_items: "{{ master_api_aws.stdout_lines | default([]) }}"
+ no_log: True
- name: Preserve Master Controllers Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
diff --git a/roles/openshift_master/templates/atomic-openshift-master.service b/roles/openshift_master/templates/atomic-openshift-master.service
new file mode 100644
index 000000000..02af4dd16
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Atomic OpenShift Master
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=atomic-openshift-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/atomic-openshift-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=atomic-openshift-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=atomic-openshift-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 155abd970..897ee7285 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -4,9 +4,9 @@ Documentation=https://github.com/openshift/origin
After=etcd_container.service
Wants=etcd_container.service
Before={{ openshift.common.service_type }}-node.service
-After=docker.service
-PartOf=docker.service
-Requires=docker.service
+After={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
@@ -23,5 +23,5 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 088e8db43..451f3436a 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -3,9 +3,9 @@ Description=Atomic OpenShift Master Controllers
Documentation=https://github.com/openshift/origin
Wants={{ openshift.common.service_type }}-master-api.service
After={{ openshift.common.service_type }}-master-api.service
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
@@ -22,4 +22,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 13381cd1a..7f40cb042 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
After=etcd_container.service
Wants=etcd_container.service
@@ -15,4 +15,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_master/templates/origin-master.service b/roles/openshift_master/templates/origin-master.service
new file mode 100644
index 000000000..cf79dda02
--- /dev/null
+++ b/roles/openshift_master/templates/origin-master.service
@@ -0,0 +1,23 @@
+[Unit]
+Description=Origin Master Service
+Documentation=https://github.com/openshift/origin
+After=network-online.target
+After=etcd.service
+Before=origin-node.service
+Requires=network-online.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/origin-master
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=origin-master
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=multi-user.target
+WantedBy=origin-node.service
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index d4c9a96ca..2617efaf1 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -64,10 +64,10 @@
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
--overwrite=false
+ when: item != openshift_ca_host
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
run_once: true
@@ -94,8 +94,8 @@
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
- | difference([openshift_ca_host])}}"
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"
+ when: item != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e570392ff..65f85066e 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-master
'''
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 6f8f09b22..f048e0aef 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -128,10 +128,10 @@
- name: Test if scheduler config is readable
fail:
msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
- when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}"
+ when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'
- name: Set current scheduler predicates and priorities
set_fact:
openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
- when: "{{ scheduler_config_stat.stat.exists }}"
+ when: scheduler_config_stat.stat.exists
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index f4c61a75e..84503217b 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -76,7 +76,7 @@ openshift_metrics_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
```
e.g
```
-openshift_metrics_cassandra_limits_memory: 1G
+openshift_metrics_cassandra_limits_memory: 1Gi
openshift_metrics_hawkular_requests_cpu: 100
```
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index ffb812271..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -4,6 +4,15 @@
when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
notify: Verify API Server
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+ notify: Verify API Server
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
- name: Verify API Server
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 07b7eca33..fb4fe2f03 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -14,20 +14,22 @@
changed_when: no
- name: generate password for hawkular metrics
- local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
+- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd"
+ register: hawkular_metrics_pwd
+ no_log: true
+
- name: generate htpasswd file for hawkular metrics
- local_action: >
- shell htpasswd -ci
- '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular
- < '{{ local_tmp.stdout }}/hawkular-metrics.pwd'
+ local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
+ no_log: true
- name: copy local generated passwords to target
copy:
- src: "{{local_tmp.stdout}}/{{item}}"
- dest: "{{mktemp.stdout}}/{{item}}"
+ src: "{{ local_tmp.stdout }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- hawkular-metrics.pwd
- hawkular-metrics.htpasswd
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index a467c1a51..3b4e8560f 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -23,7 +23,7 @@
changed_when: false
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
- when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+ when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
- name: generate hawkular-cassandra persistent volume claims
template:
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index d13b96be1..0eb852d91 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -22,7 +22,7 @@
with_items:
- hawkular-metrics-certs
- hawkular-metrics-account
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Generating serviceaccount for heapster
template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index ffe6f63a2..74eb56713 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -10,11 +10,11 @@
- cassandra
loop_control:
loop_var: include_file
- when: "not {{ openshift_metrics_heapster_standalone | bool }}"
+ when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
include: install_heapster.yaml
- when: "{{ openshift_metrics_heapster_standalone | bool }}"
+ when: openshift_metrics_heapster_standalone | bool
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
register: object_def_files
@@ -48,7 +48,7 @@
- name: Scaling down cluster to recognize changes
include: stop_metrics.yaml
- when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}"
+ when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index c8d222c60..e8b7bea5c 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -19,7 +19,7 @@
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
changed_when: False
- when: "{{ openshift_metrics_install_metrics | bool }}"
+ when: openshift_metrics_install_metrics | bool
- name: Create temp directory local on control node
local_action: command mktemp -d
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index b5a1c8f06..2037e8dc3 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -20,7 +20,7 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
- command: >
{{openshift.common.client_binary}}
@@ -42,7 +42,7 @@
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_metrics_rc | length > 0 }}"
+ changed_when: metrics_metrics_rc | length > 0
- command: >
{{openshift.common.client_binary}}
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index f69bb0f11..9a2ce9267 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -41,7 +41,7 @@
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
- changed_when: "{{metrics_hawkular_rc | length > 0 }}"
+ changed_when: metrics_hawkular_rc | length > 0
- command: >
{{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
@@ -63,4 +63,4 @@
loop_control:
loop_var: object
when: metrics_cassandra_rc is defined
- changed_when: "{{metrics_cassandra_rc | length > 0 }}"
+ changed_when: metrics_cassandra_rc | length > 0
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 8a6be6237..9a5d52eb6 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -8,7 +8,7 @@
delete --ignore-not-found --selector=metrics-infra
all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
register: delete_metrics
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
- name: remove rolebindings
command: >
@@ -16,4 +16,4 @@
delete --ignore-not-found
rolebinding/hawkular-view
clusterrolebinding/heapster-cluster-reader
- changed_when: "delete_metrics.stdout != 'No resources found'"
+ changed_when: delete_metrics.stdout != 'No resources found'
diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml
index 20fc45fd4..be1e3c3a0 100644
--- a/roles/openshift_metrics/tasks/update_master_config.yaml
+++ b/roles/openshift_metrics/tasks/update_master_config.yaml
@@ -4,6 +4,9 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.metricsPublicURL
yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- notify: restart master
+ notify:
+ - restart master
+ - restart master api
+ - restart master controllers
tags:
- - update_master_config
+ - update_master_config
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index bd95f8526..bf66ef1d6 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -8,4 +8,4 @@ os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
+ when: openshift.common.use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d7fea2f32..656874f56 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -147,7 +147,7 @@
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
no_log: True
- when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
+ when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
notify:
- restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 52482d09b..a0fbf7dfc 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -25,6 +25,13 @@
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "{{ openshift.common.service_type }}-node.service"
+ register: install_node_result
+ when: not openshift.common.is_containerized | bool
+
- name: Create the openvswitch service env file
template:
src: openvswitch.sysconfig.j2
@@ -115,6 +122,5 @@
- name: Reload systemd units
command: systemctl daemon-reload
- when: (openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)) or install_oom_fix_result | changed
notify:
- restart node
diff --git a/roles/openshift_node/templates/atomic-openshift-node.service b/roles/openshift_node/templates/atomic-openshift-node.service
new file mode 100644
index 000000000..80232094a
--- /dev/null
+++ b/roles/openshift_node/templates/atomic-openshift-node.service
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Node
+After={{ openshift.docker.service_name }}.service
+After=openvswitch.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/atomic-openshift-node
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=atomic-openshift-node
+Restart=always
+RestartSec=5s
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index c42bdb7c3..06782cb8b 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,9 +1,9 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
Requires=openvswitch.service
After=ovsdb-server.service
@@ -25,4 +25,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node/templates/origin-node.service b/roles/openshift_node/templates/origin-node.service
new file mode 100644
index 000000000..8047301e6
--- /dev/null
+++ b/roles/openshift_node/templates/origin-node.service
@@ -0,0 +1,21 @@
+[Unit]
+Description=Origin Node
+After={{ openshift.docker.service_name }}.service
+Wants={{ openshift.docker.service_name }}.service
+Documentation=https://github.com/openshift/origin
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/origin-node
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=65536
+LimitCORE=infinity
+WorkingDirectory=/var/lib/origin/
+SyslogIdentifier=origin-node
+Restart=always
+RestartSec=5s
+OOMScoreAdjust=-999
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 1aa826c09..502f80434 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -6,6 +6,6 @@
- name: restart docker after updating ca trust
systemd:
- name: docker
+ name: "{{ openshift.docker.service_name }}"
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index a9fab74e1..e576228ba 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -6,7 +6,9 @@
# - openshift.master.api_port
- name: Restart docker
- service: name=docker state=restarted
+ service:
+ name: "{{ openshift.docker.service_name }}"
+ state: restarted
- name: Update docker facts
openshift_facts:
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
index 0fb34cffd..4c47f8c0d 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
@@ -1,6 +1,6 @@
[Unit]
-Requires=docker.service
-After=docker.service
+Requires={{ openshift.docker.service_name }}.service
+After={{ openshift.docker.service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 0ff398152..a9b393652 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -1,9 +1,9 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After=docker.service
+After={{ openshift.docker.service_name }}.service
After=openvswitch.service
-PartOf=docker.service
-Requires=docker.service
+PartOf={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
{% if openshift.common.use_openshift_sdn %}
Requires=openvswitch.service
{% endif %}
@@ -23,4 +23,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
index 1e1f8967d..34aaaabd6 100644
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After=docker.service
-Requires=docker.service
-PartOf=docker.service
+After={{ openshift.docker.service_name }}.service
+Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy=docker.service
+WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 57279c665..b53b6afa1 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -65,6 +65,6 @@
{{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
- failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+ failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
check_mode: no
when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 84a0905cc..9a9436fcb 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -40,4 +40,21 @@
- openshift_deployment_type == 'origin'
- openshift_enable_origin_repo | default(true) | bool
+ # Singleton block
+ - when: r_osr_first_run | default(true)
+ block:
+ - name: Ensure clean repo cache in the event repos have been changed manually
+ debug:
+ msg: "First run of openshift_repos"
+ changed_when: true
+ notify: refresh cache
+
+ - name: Set fact r_osr_first_run false
+ set_fact:
+ r_osr_first_run: false
+
+ # Force running ALL handlers now, because we expect repo cache to be cleared
+ # if changes have been made.
+ - meta: flush_handlers
+
when: not ostree_booted.stat.exists
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index cf0fb94c9..7b310dbf8 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -8,10 +8,24 @@ Requirements
* Ansible 2.2
+Host Groups
+-----------
+
+The following group is expected to be populated for this role to run:
+
+* `[glusterfs]`
+
+Additionally, the following group may be specified either in addition to or
+instead of the above group to deploy a GlusterFS cluster for use by a natively
+hosted Docker registry:
+
+* `[glusterfs_registry]`
+
Role Variables
--------------
-From this role:
+This role has the following variables that control the integration of a
+GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | |
|--------------------------------------------------|-------------------------|-----------------------------------------|
@@ -31,6 +45,25 @@ From this role:
| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+Each role variable also has a corresponding variable to optionally configure a
+separate GlusterFS cluster for use as storage for an integrated Docker
+registry. These variables start with the prefix
+`openshift_storage_glusterfs_registry_` and, for the most part, default to the
+values in their corresponding non-registry variables. The following variables
+are an exception:
+
+| Name | Default value | |
+|---------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+
+Additionally, this role's behavior responds to the following registry-specific
+variable:
+
+| Name | Default value | Description |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+
Dependencies
------------
@@ -47,6 +80,7 @@ Example Playbook
hosts: oo_first_master
roles:
- role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
```
License
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index ade850747..ebe9ca30b 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -2,7 +2,7 @@
openshift_storage_glusterfs_timeout: 300
openshift_storage_glusterfs_namespace: 'default'
openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
@@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: ''
openshift_storage_glusterfs_heketi_user_key: ''
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+
+openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
+openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
new file mode 100644
index 000000000..fa5fa2cb0
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -0,0 +1,166 @@
+---
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ glusterfs_namespace }}"
+ when: glusterfs_is_native or glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+ when: glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,dc,jobs,secret"
+ selector: "deploy-heketi"
+ - kind: "template,route,service,dc"
+ name: "heketi"
+ - kind: "svc,ep"
+ name: "heketi-storage-endpoints"
+ - kind: "sa"
+ name: "heketi-service-account"
+ failed_when: False
+ when: glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when: glusterfs_heketi_wipe
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ glusterfs_namespace }}"
+ name: heketi-service-account
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ glusterfs_heketi_is_missing: False
+ when:
+ - "glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+ set_fact:
+ glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_is_missing
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
new file mode 100644
index 000000000..451990240
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -0,0 +1,22 @@
+---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+ glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+
+- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 2b35e5137..579112349 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -1,44 +1,44 @@
---
- assert:
- that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+ that: "glusterfs_nodeselector.keys() | count == 1"
msg: Only one GlusterFS nodeselector key pair should be provided
- assert:
- that: "groups.oo_glusterfs_to_config | count >= 3"
+ that: "glusterfs_nodes | count >= 3"
msg: There must be at least three GlusterFS nodes specified
- name: Delete pre-existing GlusterFS resources
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: "template,daemonset"
name: glusterfs
state: absent
- when: openshift_storage_glusterfs_wipe
+ when: glusterfs_wipe
- name: Unlabel any existing GlusterFS nodes
oc_label:
name: "{{ item }}"
kind: node
state: absent
- labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
with_items: "{{ groups.all }}"
- when: openshift_storage_glusterfs_wipe
+ when: glusterfs_wipe
- name: Delete pre-existing GlusterFS config
file:
path: /var/lib/glusterd
state: absent
delegate_to: "{{ item }}"
- with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
- when: openshift_storage_glusterfs_wipe
+ with_items: "{{ glusterfs_nodes | default([]) }}"
+ when: glusterfs_wipe
- name: Get GlusterFS storage devices state
command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
register: devices_info
delegate_to: "{{ item }}"
- with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
failed_when: False
- when: openshift_storage_glusterfs_wipe
+ when: glusterfs_wipe
# Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
- name: Clear GlusterFS storage device contents
@@ -46,12 +46,12 @@
delegate_to: "{{ item.item }}"
with_items: "{{ devices_info.results }}"
when:
- - openshift_storage_glusterfs_wipe
+ - glusterfs_wipe
- item.stdout_lines | count > 0
- name: Add service accounts to privileged SCC
oc_adm_policy_user:
- user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+ user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
resource_kind: scc
resource_name: privileged
state: present
@@ -64,8 +64,8 @@
name: "{{ glusterfs_host }}"
kind: node
state: add
- labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+ labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ glusterfs_nodes | default([]) }}"
loop_control:
loop_var: glusterfs_host
@@ -76,7 +76,7 @@
- name: Create GlusterFS template
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: template
name: glusterfs
state: present
@@ -85,16 +85,16 @@
- name: Deploy GlusterFS pods
oc_process:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
template_name: "glusterfs"
create: True
params:
- IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
- IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+ IMAGE_NAME: "{{ glusterfs_image }}"
+ IMAGE_VERSION: "{{ glusterfs_version }}"
- name: Wait for GlusterFS pods
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
selector: "glusterfs-node=pod"
@@ -102,6 +102,6 @@
until:
- "glusterfs_pods.results.results[0]['items'] | count > 0"
# There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
- - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 9f092d5d5..392f4b65b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -1,7 +1,30 @@
---
+- set_fact:
+ glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+ glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
+ glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
+ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+ glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
+ glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+ glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
+ glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}"
+ glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}"
+ glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}"
+ glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
+ glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
+ glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
+ glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}"
+ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
+ glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
+ glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
+ glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+
+- include: glusterfs_common.yml
+ when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
name: "{{ item.name | default(omit) }}"
selector: "{{ item.selector | default(omit) }}"
@@ -23,7 +46,7 @@
- name: Create GlusterFS registry endpoints
oc_obj:
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ namespace: "{{ glusterfs_namespace }}"
state: present
kind: endpoints
name: glusterfs-registry-endpoints
@@ -32,7 +55,7 @@
- name: Create GlusterFS registry service
oc_obj:
- namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ namespace: "{{ glusterfs_namespace }}"
state: present
kind: service
name: glusterfs-registry-endpoints
@@ -40,9 +63,9 @@
- "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
- name: Check if GlusterFS registry volume exists
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
register: registry_volume
- name: Create GlusterFS registry volume
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
- when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 76ae1db75..c14fcfb15 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -8,7 +8,7 @@
- name: Create deploy-heketi resources
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: template
name: deploy-heketi
state: present
@@ -17,18 +17,18 @@
- name: Deploy deploy-heketi pod
oc_process:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
template_name: "deploy-heketi"
create: True
params:
- IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
- IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
- HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
- HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
- name: Wait for deploy-heketi pod
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
@@ -38,4 +38,4 @@
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 84b85e95d..64410a9ab 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
register: setup_storage
failed_when: False
@@ -13,12 +13,12 @@
# Need `command` here because heketi-storage.json contains multiple objects.
- name: Copy heketi DB to GlusterFS volume
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
- when: "setup_storage.rc == 0"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
+ when: setup_storage.rc == 0
- name: Wait for copy job to finish
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: job
state: list
name: "heketi-storage-copy-job"
@@ -28,17 +28,17 @@
# Pod's 'Complete' status must be True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
failed_when:
- "'results' in heketi_job.results"
- "heketi_job.results.results | count > 0"
# Fail when pod's 'Failed' status is True
- "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
- when: "setup_storage.rc == 0"
+ when: setup_storage.rc == 0
- name: Delete deploy resources
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: "{{ item.kind }}"
name: "{{ item.name | default(omit) }}"
selector: "{{ item.selector | default(omit) }}"
@@ -55,7 +55,7 @@
- name: Create heketi resources
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: template
name: heketi
state: present
@@ -64,18 +64,18 @@
- name: Deploy heketi pod
oc_process:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
template_name: "heketi"
create: True
params:
- IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
- IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
- HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
- HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+ IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
- name: Wait for heketi pod
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
kind: pod
state: list
selector: "glusterfs=heketi-pod"
@@ -85,11 +85,11 @@
# Pod's 'Ready' status must be True
- "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
- name: Determine heketi URL
oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ namespace: "{{ glusterfs_namespace }}"
state: list
kind: ep
selector: "glusterfs=heketi-service"
@@ -98,12 +98,12 @@
- "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ retries: "{{ (glusterfs_timeout / 10) | int }}"
- name: Set heketi URL
set_fact:
- openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
- name: Verify heketi service
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index 265a3cc6e..ebd8db453 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -5,174 +5,14 @@
changed_when: False
check_mode: no
-- name: Verify target namespace exists
- oc_project:
- state: present
- name: "{{ openshift_storage_glusterfs_namespace }}"
- when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
-
-- include: glusterfs_deploy.yml
- when: openshift_storage_glusterfs_is_native
-
-- name: Make sure heketi-client is installed
- package: name=heketi-client state=present
-
-- name: Delete pre-existing heketi resources
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- kind: "{{ item.kind }}"
- name: "{{ item.name | default(omit) }}"
- selector: "{{ item.selector | default(omit) }}"
- state: absent
- with_items:
- - kind: "template,route,service,jobs,dc,secret"
- selector: "deploy-heketi"
- - kind: "template,route,dc,service"
- name: "heketi"
- - kind: "svc,ep"
- name: "heketi-storage-endpoints"
- - kind: "sa"
- name: "heketi-service-account"
- failed_when: False
- when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for deploy-heketi pods to terminate
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- kind: pod
- state: list
- selector: "glusterfs=deploy-heketi-pod"
- register: heketi_pod
- until: "heketi_pod.results.results[0]['items'] | count == 0"
- delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
- when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for heketi pods to terminate
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- kind: pod
- state: list
- selector: "glusterfs=heketi-pod"
- register: heketi_pod
- until: "heketi_pod.results.results[0]['items'] | count == 0"
- delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
- when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Create heketi service account
- oc_serviceaccount:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- name: heketi-service-account
- state: present
- when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Add heketi service account to privileged SCC
- oc_adm_policy_user:
- user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
- resource_kind: scc
- resource_name: privileged
- state: present
- when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Allow heketi service account to view/edit pods
- oc_adm_policy_user:
- user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
- resource_kind: role
- resource_name: edit
- state: present
- when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check for existing deploy-heketi pod
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- state: list
- kind: pod
- selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
- register: heketi_pod
- when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy deploy-heketi
- set_fact:
- openshift_storage_glusterfs_heketi_deploy_is_missing: False
- when:
- - "openshift_storage_glusterfs_heketi_is_native"
- - "heketi_pod.results.results[0]['items'] | count > 0"
- # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- name: Check for existing heketi pod
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- state: list
- kind: pod
- selector: "glusterfs=heketi-pod"
- register: heketi_pod
- when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy heketi
- set_fact:
- openshift_storage_glusterfs_heketi_is_missing: False
+- include: glusterfs_config.yml
when:
- - "openshift_storage_glusterfs_heketi_is_native"
- - "heketi_pod.results.results[0]['items'] | count > 0"
- # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
- - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- include: heketi_deploy_part1.yml
- when:
- - openshift_storage_glusterfs_heketi_is_native
- - openshift_storage_glusterfs_heketi_deploy_is_missing
- - openshift_storage_glusterfs_heketi_is_missing
-
-- name: Determine heketi URL
- oc_obj:
- namespace: "{{ openshift_storage_glusterfs_namespace }}"
- state: list
- kind: ep
- selector: "glusterfs in (deploy-heketi-service, heketi-service)"
- register: heketi_url
- until:
- - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
- - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
- delay: 10
- retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
- when:
- - openshift_storage_glusterfs_heketi_is_native
- - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Set heketi URL
- set_fact:
- openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
- when:
- - openshift_storage_glusterfs_heketi_is_native
- - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Verify heketi service
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
- changed_when: False
-
-- name: Generate topology file
- template:
- src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
- dest: "{{ mktemp.stdout }}/topology.json"
- when:
- - openshift_storage_glusterfs_is_native
- - openshift_storage_glusterfs_heketi_topology_load
-
-- name: Load heketi topology
- command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
- register: topology_load
- failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
- when:
- - openshift_storage_glusterfs_is_native
- - openshift_storage_glusterfs_heketi_topology_load
-
-- include: heketi_deploy_part2.yml
- when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+ - g_glusterfs_hosts | default([]) | count > 0
- include: glusterfs_registry.yml
- when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+ when:
+ - g_glusterfs_registry_hosts | default([]) | count > 0
+ - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
index d72d085c9..605627ab5 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
@@ -4,7 +4,7 @@ metadata:
name: glusterfs-registry-endpoints
subsets:
- addresses:
-{% for node in groups.oo_glusterfs_to_config %}
+{% for node in glusterfs_nodes %}
- ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
{% endfor %}
ports:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
index eb5b4544f..33d8f9b36 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
@@ -1,7 +1,7 @@
{
"clusters": [
{%- set clusters = {} -%}
-{%- for node in groups.oo_glusterfs_to_config -%}
+{%- for node in glusterfs_nodes -%}
{%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
{%- if cluster in clusters -%}
{%- set _dummy = clusters[cluster].append(node) -%}
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 37c80c29e..ca896addd 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -16,3 +16,4 @@ dependencies:
- role: openshift_docker_facts
- role: docker
when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+- role: lib_utils
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index fa9b20e92..2e9b4cad3 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -86,8 +86,24 @@
include: set_version_rpm.yml
when: not is_containerized | bool
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
+- block:
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when: openshift_rpm_version != openshift_version
when: is_containerized | bool
# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml
index c7604af1a..c40777bf1 100644
--- a/roles/openshift_version/tasks/set_version_rpm.yml
+++ b/roles/openshift_version/tasks/set_version_rpm.yml
@@ -7,42 +7,18 @@
- openshift_pkg_version is defined
- openshift_version is not defined
-# if {{ openshift.common.service_type}}-excluder is enabled,
-# the repoquery for {{ openshift.common.service_type}} will not work.
-# Thus, create a temporary yum,conf file where exclude= is set to an empty list
-- name: Create temporary yum.conf file
- command: mktemp -d /tmp/yum.conf.XXXXXX
- register: yum_conf_temp_file_result
+- block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
-- set_fact:
- yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf"
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
-- name: Copy yum.conf into the temporary file
- copy:
- src: /etc/yum.conf
- dest: "{{ yum_conf_temp_file }}"
- remote_src: True
-
-- name: Clear the exclude= list in the temporary yum.conf
- lineinfile:
- # since ansible 2.3 s/dest/path
- dest: "{{ yum_conf_temp_file }}"
- regexp: '^exclude='
- line: 'exclude='
-
-- name: Gather common package version
- command: >
- {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}"
- register: common_version
- failed_when: false
- changed_when: false
- when: openshift_version is not defined
-
-- name: Delete the temporary yum.conf
- file:
- path: "{{ yum_conf_temp_file_result.stdout }}"
- state: absent
-
-- set_fact:
- openshift_version: "{{ common_version.stdout | default('0.0', True) }}"
- when: openshift_version is not defined
+ - set_fact:
+ openshift_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ when:
+ - openshift_version is not defined
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index 43db3cc74..e7ef544f4 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -17,7 +17,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | True | If false, use iptables |
+| os_firewall_use_firewalld | False | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index 4c544122f..01859e5fc 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -2,6 +2,6 @@
os_firewall_enabled: True
# firewalld is not supported on Atomic Host
# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
-os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}"
+os_firewall_use_firewalld: "{{ False }}"
os_firewall_allow: []
os_firewall_deny: []
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 8d4878fa7..aeee3ede8 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
import subprocess
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 4b2979887..509655b0c 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -14,7 +14,7 @@
- iptables
- ip6tables
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling iptables
pause:
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 38ea2477c..55f2fc471 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -7,7 +7,7 @@
enabled: no
masked: yes
register: task_result
- failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+ failed_when: task_result|failed and 'could not' not in task_result.msg|lower
- name: Wait 10 seconds after disabling firewalld
pause: