summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.papr.inventory2
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--images/installer/Dockerfile2
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--inventory/hosts.example10
-rw-r--r--inventory/hosts.glusterfs.external.example3
-rw-r--r--inventory/hosts.glusterfs.mixed.example3
-rw-r--r--inventory/hosts.glusterfs.native.example3
-rw-r--r--inventory/hosts.glusterfs.registry-only.example3
-rw-r--r--inventory/hosts.glusterfs.storage-and-registry.example3
-rw-r--r--openshift-ansible.spec52
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml1
-rw-r--r--playbooks/openshift-hosted/deploy_registry.yml4
-rw-r--r--playbooks/openshift-hosted/deploy_router.yml4
-rw-r--r--playbooks/openshift-hosted/private/openshift_default_storage_class.yml4
-rw-r--r--playbooks/openshift-logging/private/config.yml1
-rw-r--r--playbooks/openshift-master/private/additional_config.yml1
-rw-r--r--playbooks/openshift-master/private/config.yml3
-rw-r--r--playbooks/openshift-master/private/restart.yml9
-rw-r--r--playbooks/openshift-master/private/scaleup.yml1
-rw-r--r--playbooks/openshift-metrics/private/config.yml1
-rw-r--r--playbooks/openshift-node/private/restart.yml1
-rw-r--r--playbooks/openshift-node/redeploy-certificates.yml2
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py2
-rw-r--r--roles/lib_openshift/library/oc_configmap.py2
-rw-r--r--roles/lib_openshift/library/oc_edit.py2
-rw-r--r--roles/lib_openshift/library/oc_env.py2
-rw-r--r--roles/lib_openshift/library/oc_group.py2
-rw-r--r--roles/lib_openshift/library/oc_image.py2
-rw-r--r--roles/lib_openshift/library/oc_label.py2
-rw-r--r--roles/lib_openshift/library/oc_obj.py2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py2
-rw-r--r--roles/lib_openshift/library/oc_process.py2
-rw-r--r--roles/lib_openshift/library/oc_project.py2
-rw-r--r--roles/lib_openshift/library/oc_pvc.py2
-rw-r--r--roles/lib_openshift/library/oc_route.py2
-rw-r--r--roles/lib_openshift/library/oc_scale.py2
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_service.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py2
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py2
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py2
-rw-r--r--roles/lib_openshift/library/oc_user.py2
-rw-r--r--roles/lib_openshift/library/oc_version.py2
-rw-r--r--roles/lib_openshift/library/oc_volume.py2
-rw-r--r--roles/lib_openshift/src/lib/base.py2
-rw-r--r--roles/nuage_master/handlers/main.yaml8
-rw-r--r--roles/openshift_aws/defaults/main.yml4
-rw-r--r--roles/openshift_cloud_provider/defaults/main.yml4
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml18
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml9
-rw-r--r--roles/openshift_logging/README.md1
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml13
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j21
-rw-r--r--roles/openshift_logging_elasticsearch/templates/passwd.j22
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml3
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_manage_node/defaults/main.yml4
-rw-r--r--roles/openshift_manage_node/tasks/config.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml15
-rw-r--r--roles/openshift_master/tasks/push_via_dns.yml13
-rw-r--r--roles/openshift_master/tasks/restart.yml17
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml5
-rw-r--r--roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml46
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml35
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml4
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml4
-rw-r--r--roles/openshift_node/defaults/main.yml1
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml28
-rw-r--r--roles/openshift_node/templates/multipath.conf.j215
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml14
-rw-r--r--roles/openshift_storage_glusterfs/README.md106
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml26
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml2
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf5
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j219
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j219
94 files changed, 498 insertions, 204 deletions
diff --git a/.papr.inventory b/.papr.inventory
index c678e76aa..80ad81efa 100644
--- a/.papr.inventory
+++ b/.papr.inventory
@@ -22,6 +22,6 @@ ocp-master
ocp-master
[nodes]
-ocp-master openshift_schedulable=false
+ocp-master openshift_schedulable=true
ocp-node1 openshift_node_labels="{'region':'infra'}"
ocp-node2 openshift_node_labels="{'region':'infra'}"
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 6300e1179..6b945f491 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.34.0 ./
+3.9.0-0.36.0 ./
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index 22a0d06a0..c9ec8ba41 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /
# install ansible and deps
RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \
&& yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \
- && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \
+ && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which" \
&& yum install -y epel-release \
&& yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \
&& EPEL_TESTING_PKGS="python2-libcloud" \
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 3b05c1aa6..5da950744 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
-RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
+RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
diff --git a/inventory/hosts.example b/inventory/hosts.example
index f9f331880..82c588100 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -325,7 +325,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# If an external load balancer is used public hostname should resolve to
+# external load balancer address
+#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
@@ -1114,10 +1117,9 @@ ose3-etcd[1:3]-ansible.test.example.com
ose3-lb-ansible.test.example.com containerized=false
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
[nodes]
-ose3-master[1:3]-ansible.test.example.com
+# masters should be schedulable to run web console pods
+ose3-master[1:3]-ansible.test.example.com openshift_schedulable=True
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
[nfs]
diff --git a/inventory/hosts.glusterfs.external.example b/inventory/hosts.glusterfs.external.example
index bf2557cf0..e718e3280 100644
--- a/inventory/hosts.glusterfs.external.example
+++ b/inventory/hosts.glusterfs.external.example
@@ -35,7 +35,8 @@ openshift_storage_glusterfs_heketi_url=172.0.0.1
master
[nodes]
-master openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master openshift_schedulable=True
node0 openshift_schedulable=True
node1 openshift_schedulable=True
node2 openshift_schedulable=True
diff --git a/inventory/hosts.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example
index 8a20a037e..b2fc00c58 100644
--- a/inventory/hosts.glusterfs.mixed.example
+++ b/inventory/hosts.glusterfs.mixed.example
@@ -38,7 +38,8 @@ openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
master
[nodes]
-master openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master openshift_schedulable=True
node0 openshift_schedulable=True
node1 openshift_schedulable=True
node2 openshift_schedulable=True
diff --git a/inventory/hosts.glusterfs.native.example b/inventory/hosts.glusterfs.native.example
index 59acf1194..e5f2453ff 100644
--- a/inventory/hosts.glusterfs.native.example
+++ b/inventory/hosts.glusterfs.native.example
@@ -28,7 +28,8 @@ openshift_deployment_type=origin
master
[nodes]
-master openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master openshift_schedulable=True
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
node0 openshift_schedulable=True
diff --git a/inventory/hosts.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example
index 6f33e9f6d..dadb2c93e 100644
--- a/inventory/hosts.glusterfs.registry-only.example
+++ b/inventory/hosts.glusterfs.registry-only.example
@@ -34,7 +34,8 @@ openshift_hosted_registry_storage_kind=glusterfs
master
[nodes]
-master openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master openshift_schedulable=True
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
diff --git a/inventory/hosts.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example
index 1f3a4282a..184cb600b 100644
--- a/inventory/hosts.glusterfs.storage-and-registry.example
+++ b/inventory/hosts.glusterfs.storage-and-registry.example
@@ -35,7 +35,8 @@ openshift_hosted_registry_storage_kind=glusterfs
master
[nodes]
-master openshift_schedulable=False
+# masters should be schedulable to run web console pods
+master openshift_schedulable=True
# It is recommended to not use a single cluster for both general and registry
# storage, so two three-node clusters will be required.
node0 openshift_schedulable=True
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index ab00e9d0f..6fbea5260 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.34.0%{?dist}
+Release: 0.36.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -28,6 +28,7 @@ Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
Requires: python-passlib
+Requires: python2-crypto
%description
Openshift and Atomic Enterprise Ansible
@@ -200,6 +201,55 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.36.0
+- Add missing tasks file (sdodson@redhat.com)
+- Upgrade to migrate to using push to DNS for registries. (kwoodson@redhat.com)
+- Adding defaults for the gcp variables to fix an undefined ansible exception.
+ (kwoodson@redhat.com)
+- Fix vsphere sanitization (sdodson@redhat.com)
+- Set a default for required vsphere variable (sdodson@redhat.com)
+- Add python2-crypto package (ccoleman@redhat.com)
+- hosts.example: clarify usage of openshift_master_cluster_public_hostname
+ (vrutkovs@redhat.com)
+- Conditionally create pvcs for metrics depending on whether or not it already
+ exists (ewolinet@redhat.com)
+- Update hosts examples with a note about scheduling on masters
+ (vrutkovs@redhat.com)
+- Fixing file write issue. (kwoodson@redhat.com)
+- Only perform console configmap ops when >= 3.9 (sdodson@redhat.com)
+- Remove playbooks/adhoc/openshift_hosted_logging_efk.yaml (sdodson@redhat.com)
+- upgrades: use openshift_version as a regexp when checking
+ openshift.common.version (vrutkovs@redhat.com)
+- Don't update master-config.yaml with logging/metrics urls >= 3.9
+ (sdodson@redhat.com)
+- Make master schedulable (vrutkovs@redhat.com)
+- Re-add openshift_aws_elb_cert_arn. (abutcher@redhat.com)
+- Ignore openshift_pkg_version during 3.8 upgrade (rteague@redhat.com)
+- bug 1537857. Fix retrieving prometheus metrics (jcantril@redhat.com)
+- Remove master_ha bool checks (mgugino@redhat.com)
+- Don't restart docker when re-deploying node certificates (sdodson@redhat.com)
+- vsphere storage default add (davis.phillips@gmail.com)
+
+* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0
+- add glusterblock support for ansible (m.judeikis@gmail.com)
+- Add a bare minimum localhost hosts file (sdodson@redhat.com)
+- copy etcd client certificates for nuage openshift monitor
+ (siva_teja.areti@nokia.com)
+- fix hostvars parameter name (tzumainn@redhat.com)
+- remove mountpoint parameter (tzumainn@redhat.com)
+- flake cleanup (tzumainn@redhat.com)
+- code simplification and lint cleanup (tzumainn@redhat.com)
+- Symlink kubectl to oc instead of openshift (mfojtik@redhat.com)
+- Rework provisioners vars to support different prefix/version for Origin/OSE
+ (vrutkovs@redhat.com)
+- add cinder mountpoint to inventory (tzumainn@redhat.com)
+- allow setting of kibana env vars (jcantril@redhat.com)
+- No longer compare with legacy hosted var (ewolinet@redhat.com)
+- Preserving ES dc storage type unless overridden by inventory variable
+ (ewolinet@redhat.com)
+- Fix: e2e tests failing due to :1936/metrics unaccessible.
+ (jmencak@redhat.com)
+
* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0
- docker_creds: decode docker_config for py3 only if its a string
(vrutkovs@redhat.com)
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
deleted file mode 100644
index faeb332ad..000000000
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: masters[0]
- roles:
- - role: openshift_logging
- openshift_hosted_logging_cleanup: no
-
-- name: Update master-config for publicLoggingURL
- hosts: masters:!masters[0]
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
- tasks:
- - import_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index de612da21..f44ab3580 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -113,6 +113,22 @@
registry_url: "{{ openshift.master.registry_url }}"
openshift_hosted_templates_import_command: replace
+ post_tasks:
+ # we need to migrate customers to the new pattern of pushing to the registry via dns
+ # Step 1: verify the certificates have the docker registry service name
+ - shell: >
+ echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)'
+ register: cert_output
+
+ # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs
+ - name: set a fact to include the registry certs playbook if needed
+ set_fact:
+ openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}"
+
+# Run the redeploy certs based upon the certificates
+- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry
+ import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml
+
# Check for warnings to be printed at the end of the upgrade:
- name: Clean up and display warnings
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index 2b27f8dd0..edc541ef9 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -60,7 +60,7 @@
- fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
when:
- l_upgrade_nodes_only | default(False) | bool
- - openshift.common.version != openshift_version
+ - not openshift.common.version | match(openshift_version)
# If we're only upgrading nodes, skip this.
- import_playbook: ../../../../openshift-master/private/validate_restart.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index fdcdb17ff..1dcd9a207 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -72,8 +72,6 @@
# support for optional hooks to be defined.
- name: Upgrade master
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index d7441290d..b3bc121d7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -64,6 +64,7 @@
- import_playbook: ../upgrade_control_plane.yml
vars:
openshift_release: '3.8'
+ openshift_pkg_version: ''
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml
new file mode 100644
index 000000000..2453329dd
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_registry.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_registry.yml
diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml
new file mode 100644
index 000000000..e832eeeea
--- /dev/null
+++ b/playbooks/openshift-hosted/deploy_router.yml
@@ -0,0 +1,4 @@
+---
+- import_playbook: ../init/main.yml
+
+- import_playbook: private/openshift_hosted_router.yml
diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
index 62fe0dd60..c59ebcead 100644
--- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
+++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml
@@ -3,4 +3,6 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere']
diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml
index d6b26647c..07aa8bfde 100644
--- a/playbooks/openshift-logging/private/config.yml
+++ b/playbooks/openshift-logging/private/config.yml
@@ -24,6 +24,7 @@
- import_role:
name: openshift_logging
tasks_from: update_master_config
+ when: not openshift.common.version_gte_3_9
- name: Logging Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml
index 85be0e600..ca514ed26 100644
--- a/playbooks/openshift-master/private/additional_config.yml
+++ b/playbooks/openshift-master/private/additional_config.yml
@@ -16,7 +16,6 @@
vars:
cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
roles:
- role: openshift_project_request_template
diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml
index 153ea9993..d2fc2eed8 100644
--- a/playbooks/openshift-master/private/config.yml
+++ b/playbooks/openshift-master/private/config.yml
@@ -78,7 +78,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Inspect state of first master config settings
@@ -166,7 +165,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
@@ -186,6 +184,7 @@
- role: openshift_buildoverrides
- role: nickhammond.logrotate
- role: openshift_master
+ openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml
index 5cb284935..17d90533c 100644
--- a/playbooks/openshift-master/private/restart.yml
+++ b/playbooks/openshift-master/private/restart.yml
@@ -3,16 +3,13 @@
- name: Restart masters
hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
- handlers:
- - import_tasks: ../../../roles/openshift_master/handlers/main.yml
roles:
- openshift_facts
post_tasks:
- include_tasks: tasks/restart_hosts.yml
when: openshift_rolling_restart_mode | default('services') == 'system'
-
- - include_tasks: tasks/restart_services.yml
+ - import_role:
+ name: openshift_master
+ tasks_from: restart.yml
when: openshift_rolling_restart_mode | default('services') == 'services'
diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml
index 007b23ea3..20ebf70d3 100644
--- a/playbooks/openshift-master/private/scaleup.yml
+++ b/playbooks/openshift-master/private/scaleup.yml
@@ -8,7 +8,6 @@
- openshift_facts:
role: master
local_facts:
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- name: Update master count
modify_yaml:
diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml
index 1e237e3f0..889ea77b1 100644
--- a/playbooks/openshift-metrics/private/config.yml
+++ b/playbooks/openshift-metrics/private/config.yml
@@ -25,6 +25,7 @@
import_role:
name: openshift_metrics
tasks_from: update_master_config.yaml
+ when: not openshift.common.version_gte_3_9
- name: Metrics Install Checkpoint End
hosts: all
diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml
index 7249ced70..7371bd7ac 100644
--- a/playbooks/openshift-node/private/restart.yml
+++ b/playbooks/openshift-node/private/restart.yml
@@ -16,6 +16,7 @@
until: not (l_docker_restart_docker_in_node_result is failed)
retries: 3
delay: 30
+ when: openshift_node_restart_docker_required | default(True)
- name: Restart containerized services
service:
diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml
index 8b7272485..cdf816fbf 100644
--- a/playbooks/openshift-node/redeploy-certificates.yml
+++ b/playbooks/openshift-node/redeploy-certificates.yml
@@ -4,3 +4,5 @@
- import_playbook: private/redeploy-certificates.yml
- import_playbook: private/restart.yml
+ vars:
+ openshift_node_restart_docker_required: False
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 05b2763d5..bfed58011 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1138,7 +1138,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index 324f52689..c78e379d5 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1116,7 +1116,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 152f270ab..b1b2cb5b5 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 3082f5890..2773201d7 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1110,7 +1110,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 92515889b..25cbed8b7 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index fe565987c..e26214316 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1228,7 +1228,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index 44de29592..62fca19e5 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1253,7 +1253,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 9761b4b4e..0c4bfa01f 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1102,7 +1102,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 047edffbb..36e6111eb 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1108,7 +1108,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 0cea07256..ab4f153c7 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1152,7 +1152,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 1f52fba40..f334ddaa4 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1119,7 +1119,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 72023eaf7..7e9078339 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1092,7 +1092,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 94b08d9ce..e71e2eb5c 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1111,7 +1111,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index ad837fdb5..ac3279ef8 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1128,7 +1128,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 892546e56..ca53c4c97 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1131,7 +1131,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 38df585f0..877c78d93 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1063,7 +1063,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 70632f86d..507170424 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1120,7 +1120,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index 4eee748d7..347e879ca 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1117,7 +1117,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 2e73a7645..93c96b817 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index e003770d8..3369cf134 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1168,7 +1168,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index c142f1f43..1b6202a26 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1106,7 +1106,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 62bda33ad..732299e48 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index c541e1bbd..a6cf764ff 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -1171,7 +1171,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 646a39224..90d514292 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 99a8e8f3d..0d9acac0e 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
index 7e7d0fa60..6fb5a94e9 100644
--- a/roles/lib_openshift/library/oc_storageclass.py
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -1122,7 +1122,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 7bbe38819..feb69348b 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 63adbd6ac..0f024c048 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1076,7 +1076,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 3c07f8d4b..6f409f979 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1153,7 +1153,7 @@ class Utils(object): # pragma: no cover
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 1fb32164e..9a4ce3509 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -314,7 +314,7 @@ class Utils(object):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
- sfd.write(contents)
+ sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 7b55dda56..c0411d641 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,9 +1,7 @@
---
- name: restart master api
systemd: name={{ openshift_service_type }}-master-api state=restarted
- when: >
- (openshift_master_ha | bool) and
- (not master_api_service_status_changed | default(false))
+ when: (not master_api_service_status_changed | default(false))
# TODO: need to fix up ignore_errors here
# We retry the controllers because the API may not be 100% initialized yet.
@@ -13,7 +11,5 @@
delay: 5
register: result
until: result.rc == 0
- when: >
- (openshift_master_ha | bool) and
- (not master_controllers_service_status_changed | default(false))
+ when: (not master_controllers_service_status_changed | default(false))
ignore_errors: yes
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 178e0849c..c8d385db5 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -44,6 +44,8 @@ openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}"
+openshift_aws_elb_cert_arn: ''
+
openshift_aws_elb_dict:
master:
external:
@@ -65,7 +67,7 @@ openshift_aws_elb_dict:
load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"
instance_protocol: ssl
instance_port: "{{ openshift_master_api_port | default(8443) }}"
- ssl_certificate_id: ''
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
name: "{{ openshift_aws_elb_basename }}-master-external"
tags: "{{ openshift_aws_kube_tags }}"
internal:
diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml
new file mode 100644
index 000000000..37cbf5603
--- /dev/null
+++ b/roles/openshift_cloud_provider/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+openshift_gcp_project: ''
+openshift_gcp_prefix: ''
+openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network"
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index 395bd304c..9e1c31b1d 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -1,4 +1,12 @@
---
+- name: check variables are passed
+ fail:
+ msg: "Ensure correct variables are defined for gcp. {{ item }}"
+ when: item == ''
+ with_items:
+ - "{{ openshift_gcp_project }}"
+ - "{{ openshift_gcp_prefix }}"
+
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
@@ -16,8 +24,8 @@
option: "{{ item.key }}"
value: "{{ item.value }}"
with_items:
- - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
- - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
- - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
- - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
- - { key: 'multizone', value: 'false' }
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index 014c06641..687d60171 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -1,4 +1,7 @@
---
+# Must not be blank if you're using vsphere
+openshift_cloudprovider_vsphere_datacenter: ''
+
openshift_storageclass_defaults:
aws:
provisioner: aws-ebs
@@ -19,6 +22,12 @@ openshift_storageclass_defaults:
parameters:
fstype: xfs
+ vsphere:
+ provisioner: vsphere-volume
+ name: standard
+ parameters:
+ datastore: "{{ openshift_cloudprovider_vsphere_datacenter }}"
+
openshift_storageclass_default: "true"
openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index a192bd67e..c438236a4 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
+- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"})
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index ced7397b5..6be47b1f8 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -140,4 +140,6 @@
console_config_edits:
- key: clusterInfo#loggingPublicURL
value: ""
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 1b4bdb11f..c905502ac 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -335,4 +335,6 @@
console_config_edits:
- key: clusterInfo#loggingPublicURL
value: "https://{{ openshift_logging_kibana_hostname }}"
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index ff5ad1045..b731d93a0 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -137,6 +137,16 @@
- "prometheus_out.stderr | length > 0"
- "'already exists' not in prometheus_out.stderr"
+- set_fact:
+ _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}"
+
+- template:
+ src: passwd.j2
+ dest: "{{mktemp.stdout}}/passwd.yml"
+ vars:
+ logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}"
+ logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}"
+
# View role and binding
- name: Generate logging-elasticsearch-view-role
template:
@@ -255,6 +265,8 @@
path: "{{ generated_certs_dir }}/ca.crt"
- name: admin.jks
path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: passwd.yml
+ path: "{{mktemp.stdout}}/passwd.yml"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -391,6 +403,7 @@
es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
+ basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
- name: Set ES dc
oc_obj:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 4b189f255..b1d6a4489 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -51,6 +51,7 @@ spec:
- -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
+ - -basic-auth-password={{ basic_auth_passwd }}
- -upstream=https://localhost:9200
- '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
- '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2
new file mode 100644
index 000000000..a22151eef
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2
@@ -0,0 +1,2 @@
+"{{logging_user_name}}":
+ passwd: "{{logging_user_passwd}}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index 899193838..b69cbacae 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200
openshift_logging_kibana_replicas: 1
openshift_logging_kibana_edge_term_policy: Redirect
+# map of env. var to add to the kibana deploymentconfig
+openshift_logging_kibana_env_vars: {}
+
# this is used to determine if this is an operations deployment or a non-ops deployment
# simply used for naming purposes
openshift_logging_kibana_ops_deployment: false
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index 3c3bd902e..c67235c62 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -251,6 +251,7 @@
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
+ kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}"
- name: Set Kibana DC
oc_obj:
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 57d216373..ed05b8458 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -70,6 +70,10 @@ spec:
resourceFieldRef:
containerName: kibana
resource: limits.memory
+{% for key, value in kibana_env_vars.items() %}
+ - name: "{{ key }}"
+ value: "{{ value }}"
+{% endfor %}
volumeMounts:
- name: kibana
mountPath: /etc/kibana/keys
diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml
index 00e04b9f2..b7a89a723 100644
--- a/roles/openshift_manage_node/defaults/main.yml
+++ b/roles/openshift_manage_node/defaults/main.yml
@@ -1,9 +1,5 @@
---
# openshift_manage_node_is_master is set at the play level.
openshift_manage_node_is_master: False
-
-# Default is to be schedulable except for master nodes.
-l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}"
-
openshift_master_node_labels:
node-role.kubernetes.io/master: 'true'
diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml
index 4f00351b5..e5753d185 100644
--- a/roles/openshift_manage_node/tasks/config.yml
+++ b/roles/openshift_manage_node/tasks/config.yml
@@ -2,7 +2,7 @@
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
- schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}"
+ schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"
retries: 10
delay: 5
register: node_schedulable
@@ -23,5 +23,5 @@
delegate_to: "{{ openshift_master_host }}"
vars:
l_node_labels: "{{ openshift_node_labels | default({}) }}"
- l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}"
+ l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}"
l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}"
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index b12a6b346..680e4a4ff 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -137,17 +137,8 @@
- item.clientCA | default('') != ''
with_items: "{{ openshift.master.identity_providers }}"
-# This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
-# The template file will stomp any other settings made.
-- block:
- - name: check whether our docker-registry setting exists in the env file
- command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master"
- failed_when: false
- changed_when: false
- register: l_already_set
-
- - set_fact:
- openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+- name: Include push_via_dns.yml
+ include_tasks: push_via_dns.yml
- name: Set fact of all etcd host IPs
openshift_facts:
@@ -227,7 +218,7 @@
- pause:
seconds: 15
when:
- - openshift.master.ha | bool
+ - openshift_master_ha | bool
- name: Start and enable master api all masters
systemd:
diff --git a/roles/openshift_master/tasks/push_via_dns.yml b/roles/openshift_master/tasks/push_via_dns.yml
new file mode 100644
index 000000000..c5876130a
--- /dev/null
+++ b/roles/openshift_master/tasks/push_via_dns.yml
@@ -0,0 +1,13 @@
+---
+# This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
+# The template file will stomp any other settings made.
+- when: openshift_push_via_dns is not defined
+ block:
+ - name: check whether our docker-registry setting exists in the env file
+ shell: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master*"
+ failed_when: false
+ changed_when: false
+ register: l_already_set
+
+ - set_fact:
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml
index 715347101..f7697067a 100644
--- a/roles/openshift_master/tasks/restart.yml
+++ b/roles/openshift_master/tasks/restart.yml
@@ -3,7 +3,6 @@
service:
name: "{{ openshift_service_type }}-master-api"
state: restarted
- when: openshift_master_ha | bool
- name: Wait for master API to come back online
wait_for:
host: "{{ openshift.common.hostname }}"
@@ -11,12 +10,10 @@
delay: 10
port: "{{ openshift.master.api_port }}"
timeout: 600
- when: openshift_master_ha | bool
-- name: Restart master controllers
- service:
- name: "{{ openshift_service_type }}-master-controllers"
- state: restarted
- # Ignore errrors since it is possible that type != simple for
- # pre-3.1.1 installations.
- ignore_errors: true
- when: openshift_master_ha | bool
+# We retry the controllers because the API may not be 100% initialized yet.
+- name: restart master controllers
+ command: "systemctl restart {{ openshift_service_type }}-master-controllers"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 870ab7c57..aeff64983 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,6 +1,8 @@
---
# systemd_units.yml is included both in the openshift_master role and in the upgrade
# playbooks.
+- name: include push_via_dns.yml tasks
+ include_tasks: push_via_dns.yml
- name: Set HA Service Info for containerized installs
set_fact:
@@ -9,7 +11,8 @@
when:
- openshift_is_containerized | bool
-- include_tasks: registry_auth.yml
+- name: include registry_auth tasks
+ include_tasks: registry_auth.yml
- name: Disable the legacy master service if it exists
systemd:
diff --git a/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml
new file mode 100644
index 000000000..6aa48f9c3
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml
@@ -0,0 +1,46 @@
+---
+- name: Check to see if PVC already exists
+ oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ namespace: "{{ openshift_metrics_project }}"
+ register: _metrics_pvc
+
+# _metrics_pvc.results.results | length > 0 returns a false positive
+# so we check for the presence of 'stderr' to determine if the obj exists or not
+# the RC for existing and not existing is both 0
+- when:
+ - _metrics_pvc.results.stderr is defined
+ block:
+ - name: generate hawkular-cassandra persistent volume claims
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
+ size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+ when:
+ - openshift_metrics_cassandra_storage_type != 'emptydir'
+ - openshift_metrics_cassandra_storage_type != 'dynamic'
+ changed_when: false
+
+ - name: generate hawkular-cassandra persistent volume claims (dynamic)
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
+ size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+ when: openshift_metrics_cassandra_storage_type == 'dynamic'
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 9026cc897..158e596ec 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -25,36 +25,7 @@
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
-- name: generate hawkular-cassandra persistent volume claims
- template:
- src: pvc.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
- vars:
- obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
- labels:
- metrics-infra: hawkular-cassandra
- access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
- size: "{{ openshift_metrics_cassandra_pvc_size }}"
- pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
- storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
- with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when:
- - openshift_metrics_cassandra_storage_type != 'emptydir'
- - openshift_metrics_cassandra_storage_type != 'dynamic'
- changed_when: false
-
-- name: generate hawkular-cassandra persistent volume claims (dynamic)
- template:
- src: pvc.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
- vars:
- obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
- labels:
- metrics-infra: hawkular-cassandra
- access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
- size: "{{ openshift_metrics_cassandra_pvc_size }}"
- pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
- storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+- include_tasks: generate_cassandra_pvcs.yaml
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when: openshift_metrics_cassandra_storage_type == 'dynamic'
- changed_when: false
+ loop_control:
+ loop_var: metrics_pvc_index
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index 6b6c21d71..f05c8968d 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -79,7 +79,9 @@
console_config_edits:
- key: clusterInfo#metricsPublicURL
value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
- command: >
{{openshift_client_binary}}
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 1664e9975..ed849916d 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -28,4 +28,6 @@
console_config_edits:
- key: clusterInfo#metricsPublicURL
value: ""
- when: openshift_web_console_install | default(true) | bool
+ when:
+ - openshift_web_console_install | default(true) | bool
+ - openshift.common.version_gte_3_9
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0fe4c2035..9f887891b 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages:
- yum-utils
# gluster
- glusterfs-fuse
+- device-mapper-multipath
# nfs
- nfs-utils
- flannel
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index a8048c42f..72415f9a6 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -1,6 +1,32 @@
---
- name: Install iSCSI storage plugin dependencies
- package: name=iscsi-initiator-utils state=present
+ package:
+ name: "{{ item }}"
+ state: present
when: not openshift_is_atomic | bool
register: result
until: result is succeeded
+ with_items:
+ - iscsi-initiator-utils
+ - device-mapper-multipath
+
+- name: restart services
+ systemd:
+ name: "{{ item }}"
+ state: started
+ enabled: True
+ with_items:
+ - multipathd
+ - rpcbind
+
+- name: Template multipath configuration
+ template:
+ dest: "/etc/multipath.conf"
+ src: multipath.conf.j2
+ backup: true
+ when: not openshift_is_atomic | bool
+
+#enable multipath
+- name: Enable multipath
+ command: "mpathconf --enable"
+ when: not openshift_is_atomic | bool
diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2
new file mode 100644
index 000000000..8a0abc2c1
--- /dev/null
+++ b/roles/openshift_node/templates/multipath.conf.j2
@@ -0,0 +1,15 @@
+# LIO iSCSI
+# TODO: Add env variables for tweaking
+devices {
+ device {
+ vendor "LIO-ORG"
+ user_friendly_names "yes"
+ path_grouping_policy "failover"
+ path_selector "round-robin 0"
+ failback immediate
+ path_checker "tur"
+ prio "const"
+ no_path_retry 120
+ rr_weight "uniform"
+ }
+}
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 62d460272..08dfd8284 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -61,3 +61,17 @@
when:
- template_service_broker_remove | default(false) | bool
- template_service_broker_install | default(true) | bool
+
+- name: Ensure that all requires vsphere configuration variables are set
+ fail:
+ msg: >
+ When the vSphere cloud provider is configured you must define all of these variables:
+ openshift_cloudprovider_vsphere_username, openshift_cloudprovider_vsphere_password,
+ openshift_cloudprovider_vsphere_host, openshift_cloudprovider_vsphere_datacenter,
+ openshift_cloudprovider_vsphere_datastore
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'vsphere'
+ - ( openshift_cloudprovider_vsphere_username is undefined or openshift_cloudprovider_vsphere_password is undefined or
+ openshift_cloudprovider_vsphere_host is undefined or openshift_cloudprovider_vsphere_datacenter is undefined or
+ openshift_cloudprovider_vsphere_datastore is undefined )
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index f7bd58db3..70a89b0ba 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -73,49 +73,51 @@ Role Variables
This role has the following variables that control the integration of a
GlusterFS cluster into a new or existing OpenShift cluster:
-| Name | Default value | Description |
-|--------------------------------------------------|-------------------------|-----------------------------------------|
-| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
-| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
-| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
-| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
-| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
-| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
-| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
-| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
-| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
-| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
-| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
-| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
-| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
-| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
-| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
-| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
-| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
-| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
-| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
-| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
-| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
-| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
-| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
-| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
-| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
-| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
-| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
-| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
-| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
-| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
-| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
-| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| Name | Default value | Description |
+|--------------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
+| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
+| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.
+| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
+| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
+| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
+| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
+| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster
+| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
+| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
+| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
+| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
+| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.
+| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
+| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
+| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
@@ -126,14 +128,16 @@ registry. These variables start with the prefix
values in their corresponding non-registry variables. The following variables
are an exception:
-| Name | Default value | Description |
-|-----------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
-| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
-| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
-| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
-| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
-| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
+| Name | Default value | Description |
+|-----------------------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
+| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
+| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default
+| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties
+| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default
+| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
+| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above
Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md):
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 4cbe262d2..7e751cc7a 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest'
openshift_storage_glusterfs_block_host_vol_create: True
openshift_storage_glusterfs_block_host_vol_size: 100
openshift_storage_glusterfs_block_host_vol_max: 15
+openshift_storage_glusterfs_block_storageclass: False
+openshift_storage_glusterfs_block_storageclass_default: False
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste
openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+openshift_storage_glusterfs_registry_block_storageclass: False
+openshift_storage_glusterfs_registry_block_storageclass_default: False
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
@@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow:
port: "24008/tcp"
- service: glusterfs_bricks
port: "49152-49251/tcp"
+- service: glusterblockd
+ port: "24010/tcp"
+- service: iscsi-targets
+ port: "3260/tcp"
+- service: rpcbind
+ port: "111/tcp"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 001578406..a5fdae803 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -315,5 +315,31 @@
- include_tasks: glusterblock_deploy.yml
when: glusterfs_block_deploy
+- block:
+ - name: Create heketi block secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-admin-secret-block"
+ type: "gluster.org/glusterblock"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when: glusterfs_heketi_admin_key is defined
+ - name: Generate Gluster Block StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+
+ - name: Create Gluster Block StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}-block"
+ files:
+ - "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+ when: glusterfs_block_storageclass
+
- include_tasks: gluster_s3_deploy.yml
when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index a374df0ce..92de1b64d 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 544a6f491..befacb04f 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -17,6 +17,8 @@
glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
+ glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}"
+ glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
index dd4d6e6f7..bcc02e217 100644
--- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -1,4 +1,7 @@
#{{ ansible_managed }}
dm_thin_pool
dm_snapshot
-dm_mirror \ No newline at end of file
+dm_mirror
+#glusterblock
+dm_multipath
+target_core_user
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
new file mode 100644
index 000000000..02ed8fa8d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-block
+{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %}
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+{% endif %}
+provisioner: gluster.org/glusterblock
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+ chapauthenabled: "true"
+ hacount: "3"
+{% if glusterfs_heketi_admin_key is defined %}
+ restsecretnamespace: "{{ glusterfs_namespace }}"
+ restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block"
+{%- endif -%}