summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/cockpit-ui/tasks/main.yml1
-rw-r--r--roles/etcd/defaults/main.yaml6
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/README.md5
-rw-r--r--roles/kuryr/templates/configmap.yaml.j28
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py7
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py16
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py4
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml7
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml7
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml7
-rw-r--r--roles/openshift_logging/README.md1
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf2
-rw-r--r--roles/openshift_node/meta/main.yml6
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml (renamed from roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml)0
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/docker/upgrade.yml)0
-rw-r--r--roles/openshift_node/tasks/upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/main.yml)6
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml)2
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml (renamed from roles/openshift_node_upgrade/tasks/restart.yml)0
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/rpm_upgrade.yml)0
-rw-r--r--roles/openshift_node_group/defaults/main.yml2
-rw-r--r--roles/openshift_node_group/templates/node-config.yaml.j22
-rw-r--r--roles/openshift_node_upgrade/README.md111
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml15
-rw-r--r--roles/openshift_node_upgrade/files/nuke_images.sh25
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml36
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml16
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml46
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml37
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j231
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service11
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service50
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf3
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service17
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j21
-rw-r--r--roles/openshift_storage_glusterfs/README.md4
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml2
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml105
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j28
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j249
74 files changed, 1030 insertions, 546 deletions
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 09f4259a2..f60912033 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -41,6 +41,7 @@
command: >
{{ openshift.common.client_binary }} new-app --template=registry-console
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
+ {% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 4b734d4ed..9a3652a2b 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -6,6 +6,12 @@ r_etcd_common_backup_sufix_name: ''
r_etcd_common_etcd_runtime: "docker"
r_etcd_common_embedded_etcd: false
+osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
+etcd_image_dict:
+ origin: "registry.fedoraproject.org/f26/etcd"
+ openshift-enterprise: "{{ osm_etcd_image }}"
+etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
+
# etcd run on a host => use etcdctl command directly
# etcd run as a docker container => use docker exec
# etcd run as a runc container => use runc exec
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3e69af314..fabe66b91 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -20,7 +20,7 @@
- block:
- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
+ command: docker pull {{ etcd_image }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f71d9b551..82ac4fc84 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -5,7 +5,7 @@
tasks_from: proxy
- name: Pull etcd system container
- command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
+ command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
@@ -57,7 +57,7 @@
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
- image: "{{ openshift.etcd.etcd_image }}"
+ image: "{{ etcd_image }}"
state: latest
values:
- ETCD_DATA_DIR=/var/lib/etcd
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..99ae37319 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 723e43e8d..14fdca400 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -46,7 +46,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_health': {
'title': 'Health Check',
- 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ 'playbook': 'playbooks/openshift-checks/pre-install.yml'
},
'installer_phase_etcd': {
'title': 'etcd Install',
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
index 7b618f902..269788a11 100644
--- a/roles/kuryr/README.md
+++ b/roles/kuryr/README.md
@@ -31,6 +31,11 @@ pods. This allows to have interconnectivity between pods and OpenStack VMs.
* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
* ``kuryr_openstack_pod_project_id=pod_project_uuid``
* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+* ``kuryr_openstack_enable_pools=True``
+* ``kuryr_openstack_pool_max=0``
+* ``kuryr_openstack_pool_min=1``
+* ``kuryr_openstack_pool_batch=5``
+* ``kuryr_openstack_pool_update_frequency=20``
## Kuryr resources
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index e874d6c25..6bf6c1db2 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -161,6 +161,14 @@ data:
# The driver that provides VIFs for Kubernetes Pods. (string value)
pod_vif_driver = nested-vlan
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [vif_pool]
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index f19a421cb..48338ca1b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
+> [/playbooks/openshift-checks/certificate_expiry/](../../playbooks/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/by
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index 22fb39006..ed97d539c 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1,5 +1 @@
---
-- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 99ebb7e36..f94e0e097 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1563,7 +1563,8 @@ def set_builddefaults_facts(facts):
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+ if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1630,7 +1631,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1640,7 +1640,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1667,8 +1666,6 @@ def set_container_facts_if_unset(facts):
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
- if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
- facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 587c6f85c..4f91f6bb3 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -12,15 +12,15 @@ DEPLOYMENT_IMAGE_INFO = {
"origin": {
"namespace": "openshift",
"name": "origin",
- "registry_console_template": "${prefix}kubernetes:${version}",
"registry_console_prefix": "cockpit/",
+ "registry_console_basename": "kubernetes",
"registry_console_default_version": "latest",
},
"openshift-enterprise": {
"namespace": "openshift3",
"name": "ose",
- "registry_console_template": "${prefix}registry-console:${version}",
- "registry_console_prefix": "registry.access.redhat.com/openshift3/",
+ "registry_console_prefix": "openshift3/",
+ "registry_console_basename": "registry-console",
"registry_console_default_version": "${short_version}",
},
}
@@ -156,7 +156,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
- required.add(self._registry_console_image(image_tag, image_info))
+ if self.get_var("osm_use_cockpit", default=True, convert=bool):
+ required.add(self._registry_console_image(image_tag, image_info))
# images for containerized components
if self.get_var("openshift", "common", "is_containerized"):
@@ -180,6 +181,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"openshift_cockpit_deployer_prefix",
default=image_info["registry_console_prefix"],
)
+ basename = self.get_var(
+ "openshift_cockpit_deployer_basename",
+ default=image_info["registry_console_basename"],
+ )
# enterprise template just uses v3.6, v3.7, etc
match = re.match(r'v\d+\.\d+', image_tag)
@@ -187,8 +192,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
version = image_info["registry_console_default_version"].replace("${short_version}", short_version)
version = self.get_var("openshift_cockpit_deployer_version", default=version)
- template = image_info["registry_console_template"]
- return template.replace('${prefix}', prefix).replace('${version}', version)
+ return prefix + basename + ':' + version
def local_images(self, images):
"""Filter a list of images and return those available locally."""
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 484aa72e0..ec46c3b4b 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -217,7 +217,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
- 'registry.access.redhat.com/openshift3/registry-console:vtest',
+ 'openshift3/registry-console:vtest',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
@@ -261,7 +261,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_deployment_type="openshift-enterprise",
openshift_image_tag="vtest",
),
- "registry.access.redhat.com/openshift3/registry-console:vtest",
+ "openshift3/registry-console:vtest",
), (
dict(
openshift_deployment_type="openshift-enterprise",
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index f821efd6b..cc3159a32 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.6", set version "v3.6"'
name: IMAGE_VERSION
value: "v3.6"
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 019d836fe..9f2e6125d 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.7", set version "v3.7"'
name: IMAGE_VERSION
value: "v3.7"
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index 5acbb02b3..f04ce06d3 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.8", set version "v3.8"'
name: IMAGE_VERSION
value: "v3.8"
diff --git a/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 6c5bb8693..27cfc17d6 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -84,6 +84,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_es_pvc_storage_class_name`: The name of the storage class to use for a static PVC. Defaults to ''.
- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 89e583771..2fefdc894 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -84,6 +84,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
@@ -110,6 +111,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -146,6 +148,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -187,6 +190,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index bec4432c3..0ea913224 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -31,6 +31,7 @@ openshift_logging_elasticsearch_pvc_name: ""
openshift_logging_elasticsearch_pvc_size: ""
openshift_logging_elasticsearch_pvc_dynamic: false
openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_storage_class_name: ""
openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_elasticsearch_storage_group: ['65534']
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 2bd02af60..770892d52 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -270,7 +270,7 @@
port: 443
targetPort: 4443
selector:
- component: "{{ es_component }}-prometheus"
+ component: "{{ es_component }}"
provider: openshift
- oc_edit:
diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_fluentd/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_mux/files/secure-forward.conf
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 5bc7b9869..c32aa1600 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,9 +13,15 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_node_facts
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_openshift
- role: lib_os_firewall
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_clock
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_docker
- role: openshift_cloud_provider
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_node_dnsmasq
+- role: lib_utils
+ when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..f92ff79b5 100644
--- a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index ebe87d6fd..ebe87d6fd 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node/tasks/upgrade.yml
index 66c1fcc38..2bca1e974 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -59,7 +59,7 @@
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
-- include: rpm_upgrade.yml
+- include: upgrade/rpm_upgrade.yml
vars:
component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
@@ -70,7 +70,7 @@
path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
state: absent
-- include: containerized_node_upgrade.yml
+- include: upgrade/containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
- name: Ensure containerized services stopped before Docker restart
@@ -165,7 +165,7 @@
value: "/etc/origin/node/resolv.conf"
# Restart all services
-- include: restart.yml
+- include: upgrade/restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
index 07b0ac715..96b94d8b6 100644
--- a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -6,7 +6,7 @@
skip_node_svc_handlers: True
- name: Update systemd units
- include: systemd_units.yml
+ include: ../systemd_units.yml
# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
# play when the node has already been marked schedulable again. (this would look strange
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index a4fa51172..a4fa51172 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index a998acf21..a998acf21 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index d398a7fdc..7c81409a5 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -23,4 +23,4 @@ openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | de
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
openshift_node_group_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_default }}"
-openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) }}"
+openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) | int }}"
diff --git a/roles/openshift_node_group/templates/node-config.yaml.j2 b/roles/openshift_node_group/templates/node-config.yaml.j2
index 5e22dc6d2..3fd16247c 100644
--- a/roles/openshift_node_group/templates/node-config.yaml.j2
+++ b/roles/openshift_node_group/templates/node-config.yaml.j2
@@ -33,7 +33,7 @@ masterClientConnectionOverrides:
qps: 20
masterKubeConfig: node.kubeconfig
networkConfig:
- mtu: "{{ openshift_node_group_network_mtu }}"
+ mtu: {{ openshift_node_group_network_mtu }}
networkPluginName: {{ openshift_node_group_network_plugin }}
nodeIP: ""
podManifestConfig: null
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
deleted file mode 100644
index 73b98ad90..000000000
--- a/roles/openshift_node_upgrade/README.md
+++ /dev/null
@@ -1,111 +0,0 @@
-OpenShift Node upgrade
-=========
-
-Role responsible for a single node upgrade.
-It is expected a node is functioning and a part of an OpenShift cluster.
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-From this role:
-
-| Name | Default value | |
-|--------------------------------|-----------------------|--------------------------------------------------------|
-| deployment_type | | Inventory var |
-| docker_upgrade_nuke_images | | Optional inventory var |
-| docker_version | | Optional inventory var |
-| l_docker_upgrade | | |
-| node_config_hook | | |
-| openshift.docker.gte_1_10 | | |
-| openshift_image_tag | | Set by openshift_version role |
-| openshift_pkg_version | | Set by openshift_version role |
-| openshift_release | | Set by openshift_version role |
-| skip_docker_restart | | |
-| openshift_cloudprovider_kind | | |
-
-From openshift.common:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.hostname |---------------------|---------------------|
-| openshift.common.http_proxy |---------------------|---------------------|
-| openshift.common.is_atomic |---------------------|---------------------|
-| openshift.common.is_containerized |---------------------|---------------------|
-| openshift.common.portal_net |---------------------|---------------------|
-| openshift.common.service_type |---------------------|---------------------|
-
-From openshift.master:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.master.api_port |---------------------|---------------------|
-
-From openshift.node:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.node.node_image |---------------------|---------------------|
-| openshift.node.ovs_image |---------------------|---------------------|
-
-
-Dependencies
-------------
-
-
-TODO
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-```
----
-- name: Upgrade nodes
- hosts: oo_nodes_to_upgrade
- serial: 1
- any_errors_fatal: true
-
- pre_tasks:
- - name: Mark unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
- - name: Drain Node for Kubelet upgrade
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
- delegate_to: "{{ groups.oo_first_master.0 }}"
- register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
- retries: 60
- delay: 60
-
-
- roles:
- - openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
- post_tasks:
- - name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
deleted file mode 100644
index 1da434e6f..000000000
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-openshift_node_debug_level: "{{ debug_level | default(2) }}"
-
-openshift_use_openshift_sdn: True
-os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
-
-openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
-openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
-
-# oreg_url is defined by user input
-oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
-oreg_auth_credentials_replace: False
-l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
diff --git a/roles/openshift_node_upgrade/files/nuke_images.sh b/roles/openshift_node_upgrade/files/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/roles/openshift_node_upgrade/files/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
deleted file mode 100644
index 90d80855e..000000000
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: restart openvswitch
- systemd:
- name: openvswitch
- state: restarted
- when:
- - not skip_node_svc_handlers | default(False) | bool
- - not (ovs_service_status_changed | default(false) | bool)
- - openshift_use_openshift_sdn | bool
- register: l_openshift_node_upgrade_stop_openvswitch_result
- until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
- retries: 3
- delay: 30
- notify:
- - restart openvswitch pause
-
-- name: restart openvswitch pause
- pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
-
-- name: restart node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- register: l_openshift_node_upgrade_restart_node_result
- until: not l_openshift_node_upgrade_restart_node_result | failed
- retries: 3
- delay: 30
- when:
- - (not skip_node_svc_handlers | default(False) | bool)
- - not (node_service_status_changed | default(false) | bool)
-
-# TODO(jchaloup): once it is verified the systemd module works as expected
-# switch to it: http://docs.ansible.com/ansible/latest/systemd_module.html
-- name: reload systemd units
- command: systemctl daemon-reload
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
deleted file mode 100644
index a810b01dc..000000000
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: your name
- description: OpenShift Node upgrade
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_utils
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
deleted file mode 100644
index 527580481..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Configure Node settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
deleted file mode 100644
index d60794305..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Configure Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
deleted file mode 100644
index ee91a88ab..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node dependencies docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
- src: openshift.docker.node.dep.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
deleted file mode 100644
index c2c5ea1d4..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install OpenvSwitch docker service file
- template:
- dest: "/etc/systemd/system/openvswitch.service"
- src: openvswitch.docker.service
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
deleted file mode 100644
index 1d75a3355..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create the openvswitch service env file
- template:
- src: openvswitch.sysconfig.j2
- dest: /etc/sysconfig/openvswitch
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
deleted file mode 100644
index 5df1abc79..000000000
--- a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# May be a temporary workaround.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
-- name: Create OpenvSwitch service.d directory
- file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
-
-- name: Install OpenvSwitch service OOM fix
- template:
- dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
- src: openvswitch-avoid-oom.conf
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
deleted file mode 100644
index f5428867a..000000000
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ oreg_auth_credentials_path }}"
- when: oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - not (openshift_docker_alternative_creds | default(False))
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- retries: 3
- delay: 5
- until: node_oreg_auth_credentials_create.rc == 0
- notify:
- - restart node
-
-# docker_creds is a custom module from lib_utils
-# 'docker login' requires a docker.service running on the local host, this is an
-# alternative implementation for non-docker hosts. This implementation does not
-# check the registry to determine whether or not the credentials will work.
-- name: Create credentials for registry auth (alternative)
- docker_creds:
- path: "{{ oreg_auth_credentials_path }}"
- registry: "{{ oreg_host }}"
- username: "{{ oreg_auth_user }}"
- password: "{{ oreg_auth_password }}"
- when:
- - openshift_docker_alternative_creds | bool
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- notify:
- - restart node
-
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
- when:
- - openshift.common.is_containerized | bool
- - oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
deleted file mode 100644
index 226f5290c..000000000
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# input variables
-# - openshift.node.node_image
-# - openshift_image_tag
-# - openshift.common.is_containerized
-# - openshift.node.ovs_image
-# - openshift_use_openshift_sdn
-# - openshift.common.service_type
-# - openshift_node_debug_level
-# - openshift.common.config_base
-# - openshift.common.http_proxy
-# - openshift.common.portal_net
-# - openshift.common
-# - openshift.common.http_proxy
-# notify:
-# - restart openvswitch
-# - restart node
-
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
-- include: config/install-node-deps-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-node-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-ovs-service-env-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift_use_openshift_sdn | bool
-
-- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
-
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
deleted file mode 100644
index e12a52c15..000000000
--- a/roles/openshift_node_upgrade/templates/node.service.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[Unit]
-Description=OpenShift Node
-After={{ openshift.docker.service_name }}.service
-Wants=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-Wants={{ openshift.docker.service_name }}.service
-Documentation=https://github.com/openshift/origin
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-Environment=GOTRACEBACK=crash
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
-LimitNOFILE=65536
-LimitCORE=infinity
-WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-TimeoutStartSec=300
-OOMScoreAdjust=-999
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
deleted file mode 100644
index aae35719c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
-
-
-[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
-ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
deleted file mode 100644
index 07d1ebc3c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ /dev/null
@@ -1,50 +0,0 @@
-[Unit]
-After={{ openshift.common.service_type }}-master.service
-After={{ openshift.docker.service_name }}.service
-After=openvswitch.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-{% if openshift_use_openshift_sdn %}
-Wants=openvswitch.service
-PartOf=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
- -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
- -e HOST=/rootfs -e HOST_ETC=/host-etc \
- -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
- -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
- {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
- -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
- -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
- -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
- -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
- -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
- -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
- -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.node.node_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf b/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
deleted file mode 100644
index 3229bc56b..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# Avoid the OOM killer for openvswitch and it's children:
-[Service]
-OOMScoreAdjust=-1000
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
deleted file mode 100644
index 34aaaabd6..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/openvswitch
-ExecStartPre=-/usr/bin/docker rm -f openvswitch
-ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 5
-ExecStop=/usr/bin/docker stop openvswitch
-SyslogIdentifier=openvswitch
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 b/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
deleted file mode 100644
index da7c3742a..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
+++ /dev/null
@@ -1 +0,0 @@
-IMAGE_VERSION={{ openshift_image_tag }}
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 03c157313..54adcf78d 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -87,7 +87,9 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_max_host_vol | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index c3db36d37..814d6ff28 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -10,7 +10,9 @@ openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
-openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_block_host_vol_create: True
+openshift_storage_glusterfs_block_host_vol_size: 100
+openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -54,7 +56,9 @@ openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_ve
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
-openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
index 2cc69644c..9c1409dee 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
@@ -2,7 +2,7 @@
kind: Template
apiVersion: v1
metadata:
- name: glusterblock
+ name: glusterblock-provisioner
labels:
glusterfs: block-template
glusterblock: template
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
new file mode 100644
index 000000000..9c1409dee
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ image: gluster/glusterblock-provisioner:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index bba1de654..d6be8c726 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -29,21 +29,21 @@
src: "{{ openshift.common.examples_content_version }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "glusterblock-template.yml"
+ - "glusterblock-provisioner.yml"
- name: Create glusterblock provisioner template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: "glusterblock"
+ name: "glusterblock-provisioner"
state: present
files:
- - "{{ mktemp.stdout }}/glusterblock-template.yml"
+ - "{{ mktemp.stdout }}/glusterblock-provisioner.yml"
- name: Deploy glusterblock provisioner
oc_process:
namespace: "{{ glusterfs_namespace }}"
- template_name: "glusterblock"
+ template_name: "glusterblock-provisioner"
create: True
params:
IMAGE_NAME: "{{ glusterfs_block_image }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index e2d740f35..1ede0ae94 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index f98d4c62f..ef37762f9 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -87,7 +87,7 @@
IMAGE_VERSION: "{{ glusterfs_version }}"
NODE_LABELS: "{{ glusterfs_nodeselector }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
+ GB_GLFS_LRU_COUNT: "{{ glusterfs_block_host_vol_max }}"
- name: Wait for GlusterFS pods
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index baac52179..1fa42efa7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
index 579b11bb7..565e9be98 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -31,6 +31,12 @@
"port" : "{{ glusterfs_heketi_ssh_port }}",
"user" : "{{ glusterfs_heketi_ssh_user }}",
"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
- }
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
}
}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}