summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/tasks/main.yml8
-rw-r--r--roles/docker/tasks/package_docker.yml2
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml4
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml2
-rw-r--r--roles/etcd/defaults/main.yaml10
-rw-r--r--roles/etcd/tasks/main.yml10
-rw-r--r--roles/etcd/tasks/restart.yml21
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/tasks/version_detect.yml55
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/installer_checkpoint/README.md4
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py10
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py7
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j22
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_node/defaults/main.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_prometheus/README.md7
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j25
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml105
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j249
-rw-r--r--roles/openshift_version/tasks/main.yml4
39 files changed, 1019 insertions, 46 deletions
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 69ee62790..55052b0a3 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,7 +2,7 @@
# These tasks dispatch to the proper set of docker tasks based on the
# inventory:openshift_docker_use_system_container variable
-- include: udev_workaround.yml
+- include_tasks: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
- set_fact:
@@ -20,7 +20,7 @@
- not l_use_crio_only
- name: Use Package Docker if Requested
- include: package_docker.yml
+ include_tasks: package_docker.yml
when:
- not l_use_system_container
- not l_use_crio_only
@@ -35,13 +35,13 @@
changed_when: false
- name: Use System Container Docker if Requested
- include: systemcontainer_docker.yml
+ include_tasks: systemcontainer_docker.yml
when:
- l_use_system_container
- not l_use_crio_only
- name: Add CRI-O usage Requested
- include: systemcontainer_crio.yml
+ include_tasks: systemcontainer_crio.yml
when:
- l_use_crio
- openshift_docker_is_node_or_master | bool
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 8121163a6..e6c3fe4d7 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -157,4 +157,4 @@
- meta: flush_handlers
# This needs to run after docker is restarted to account for proxy settings.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 3fe10454d..3e5bdf32c 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -162,7 +162,7 @@
state: directory
- name: setup firewall for CRI-O
- include: crio_firewall.yml
+ include_tasks: crio_firewall.yml
static: yes
- name: Configure the CNI network
@@ -182,6 +182,6 @@
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 84220fa66..f69acb9a5 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -177,6 +177,6 @@
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 4b734d4ed..a069e4d87 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -2,10 +2,18 @@
r_etcd_common_backup_tag: ''
r_etcd_common_backup_sufix_name: ''
+l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
# runc, docker, host
-r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
r_etcd_common_embedded_etcd: false
+osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
+etcd_image_dict:
+ origin: "registry.fedoraproject.org/f26/etcd"
+ openshift-enterprise: "{{ osm_etcd_image }}"
+etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
+
# etcd run on a host => use etcdctl command directly
# etcd run as a docker container => use docker exec
# etcd run as a runc container => use runc exec
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3e69af314..78ec2cedb 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -20,7 +20,7 @@
- block:
- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
+ command: docker pull {{ etcd_image }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
@@ -30,7 +30,7 @@
src: etcd.docker.service
when:
- etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
+ - not l_is_etcd_system_container | bool
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
@@ -90,7 +90,7 @@
enabled: no
masked: yes
daemon_reload: yes
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
register: task_result
failed_when: task_result|failed and 'could not' not in task_result.msg|lower
@@ -98,11 +98,11 @@
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
- name: Install Etcd system container
include: system_container.yml
- when: openshift.common.is_etcd_system_container | bool
+ when: l_is_etcd_system_container | bool
when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
diff --git a/roles/etcd/tasks/restart.yml b/roles/etcd/tasks/restart.yml
new file mode 100644
index 000000000..d4a016eec
--- /dev/null
+++ b/roles/etcd/tasks/restart.yml
@@ -0,0 +1,21 @@
+---
+
+- name: restart etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: stop etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+- name: start etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f71d9b551..82ac4fc84 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -5,7 +5,7 @@
tasks_from: proxy
- name: Pull etcd system container
- command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
+ command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
@@ -57,7 +57,7 @@
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
- image: "{{ openshift.etcd.etcd_image }}"
+ image: "{{ etcd_image }}"
state: latest
values:
- ETCD_DATA_DIR=/var/lib/etcd
diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml
new file mode 100644
index 000000000..fe1e418d8
--- /dev/null
+++ b/roles/etcd/tasks/version_detect.yml
@@ -0,0 +1,55 @@
+---
+- block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+- block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not l_is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - l_is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..99ae37319 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index f8588c4bf..6426cd545 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -64,11 +64,11 @@ phase are stored in the `phase_attributes` variable.
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
#...
}
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index d8bdea343..205719215 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -50,15 +50,15 @@ class CallbackModule(CallbackBase):
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
'installer_phase_loadbalancer': {
'title': 'Load balancer Install',
- 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
+ 'playbook': 'playbooks/openshift-loadbalancer/config.yml'
},
'installer_phase_master': {
'title': 'Master Install',
@@ -70,7 +70,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_node': {
'title': 'Node Install',
- 'playbook': 'playbooks/byo/openshift-node/config.yml'
+ 'playbook': 'playbooks/openshift-node/config.yml'
},
'installer_phase_glusterfs': {
'title': 'GlusterFS Install',
@@ -82,7 +82,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_metrics': {
'title': 'Metrics Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
+ 'playbook': 'playbooks/openshift-metrics/config.yml'
},
'installer_phase_logging': {
'title': 'Logging Install',
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index 22fb39006..ed97d539c 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1,5 +1 @@
---
-- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 99ebb7e36..f94e0e097 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1563,7 +1563,8 @@ def set_builddefaults_facts(facts):
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+ if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1630,7 +1631,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1640,7 +1640,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1667,8 +1666,6 @@ def set_container_facts_if_unset(facts):
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
- if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
- facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 5a4f7f762..8529b61d5 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -42,7 +42,7 @@ objects:
component: eventrouter
logging-infra: eventrouter
provider: openshift
- replicas: ${REPLICAS}
+ replicas: "${{ '{{' }}REPLICAS{{ '}}' }}"
template:
metadata:
labels:
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index cfb13d59b..79e449b73 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -59,7 +59,7 @@ spec:
{% endif %}
{% endif %}
ports:
- - containerPort: "{{ openshift_logging_mux_port }}"
+ - containerPort: {{ openshift_logging_mux_port }}
name: mux-forward
volumeMounts:
- name: config
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 89d154ad7..816338fa1 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -4,6 +4,8 @@ openshift_node_debug_level: "{{ debug_level | default(2) }}"
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 6b7e40491..9a91e2fb6 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -20,7 +20,7 @@
- when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
+ - not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 9c182ade6..9b4c24dfe 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -3,7 +3,7 @@
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
- when: not openshift.common.is_node_system_container | bool
+ when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
@@ -19,7 +19,7 @@
- name: Install Node system container
include: node_system_container.yml
when:
- - openshift.common.is_node_system_container | bool
+ - l_is_node_system_container | bool
- name: Install OpenvSwitch system containers
include: openvswitch_system_container.yml
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index 92f74928c..5bf6e7d77 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -23,6 +23,13 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component
+- `openshift_prometheus_args`: Modify or add arguments for prometheus application
+
+e.g
+```
+openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m']
+```
+
## PVC related variables
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 4e2cea0b9..1b4a12cac 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -9,6 +9,9 @@ openshift_prometheus_node_selector: {"region":"infra"}
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
+#prometheus application arguments
+openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m']
+
# storage
# One of ['emptydir', 'pvc']
openshift_prometheus_storage_type: "emptydir"
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 456db3a57..e73a94eee 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -75,8 +75,9 @@ spec:
- name: prometheus
args:
- - --storage.tsdb.retention=6h
- - --storage.tsdb.min-block-duration=2m
+{% for arg in openshift_prometheus_args %}
+ - {{ arg }}
+{% endfor %}
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}"
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 95ba9fe4c..552a22a0f 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -35,7 +35,7 @@
- when: r_openshift_repos_has_run is not defined
block:
- - include: centos_repos.yml
+ - include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
index 94d3acffc..795b8ee60 100644
--- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -16,6 +16,6 @@
# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
- name: Assign deprecated variables to correct counterparts
- include: "{{ item }}"
+ include_tasks: "{{ item }}"
with_fileglob:
- "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 70b236033..77428272c 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,7 +1,7 @@
---
# We should print out deprecations prior to any failures so that if a play does fail for other reasons
# the user would also be aware of any deprecated variables they should note to adjust
-- include: deprecations.yml
+- include_tasks: deprecations.yml
- name: Abort when conflicting deployment type variables are set
when:
@@ -53,7 +53,7 @@
openshift_release is "{{ openshift_release }}" which is not a valid version string.
Please set it to a version string like "3.4".
-- include: unsupported.yml
+- include_tasks: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
new file mode 100644
index 000000000..9c1409dee
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ image: gluster/glusterblock-provisioner:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 1c8b9046c..4f9158ade 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -93,11 +93,11 @@
- inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
block:
- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
+ include_tasks: set_version_rpm.yml
when: not is_containerized | bool
- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
+ include_tasks: set_version_containerized.yml
when: is_containerized | bool
- block: