summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorSuren A. Chilingaryan <csa@suren.me>2018-03-18 22:59:31 +0100
committerSuren A. Chilingaryan <csa@suren.me>2018-03-18 22:59:31 +0100
commit47f350bc3aa85a8bd406d95faf084df2abf74ae9 (patch)
tree72ad1e91bac46d3457f89781dc90f0d6c1c074d5 /roles
parent006f333828db373435daa15483d2ab753048f62a (diff)
downloadands-47f350bc3aa85a8bd406d95faf084df2abf74ae9.tar.gz
ands-47f350bc3aa85a8bd406d95faf084df2abf74ae9.tar.bz2
ands-47f350bc3aa85a8bd406d95faf084df2abf74ae9.tar.xz
ands-47f350bc3aa85a8bd406d95faf084df2abf74ae9.zip
Second revision: includes hostpath mounts, gluster block storage, kaas apps, etc.
Diffstat (limited to 'roles')
-rwxr-xr-xroles/ands_backup/templates/backup.sh.j216
-rw-r--r--roles/ands_facts/tasks/main.yml5
-rw-r--r--roles/ands_facts/tasks/network.yml1
-rw-r--r--roles/ands_facts/tasks/storage.yml4
-rw-r--r--roles/ands_kaas/defaults/main.yml7
-rw-r--r--roles/ands_kaas/tasks/do_apps.yml16
-rw-r--r--roles/ands_kaas/tasks/do_project.yml20
-rw-r--r--roles/ands_kaas/tasks/do_storage.yml43
-rw-r--r--roles/ands_kaas/tasks/file.yml9
-rw-r--r--roles/ands_kaas/tasks/main.yml4
-rw-r--r--roles/ands_kaas/tasks/project.yml32
-rw-r--r--roles/ands_kaas/tasks/template.yml9
-rw-r--r--roles/ands_kaas/tasks/templates.yml5
-rw-r--r--roles/ands_kaas/tasks/volume.yml16
-rw-r--r--roles/ands_kaas/templates/00-block-volumes.yml.j248
-rw-r--r--roles/ands_kaas/templates/00-gfs-volumes.yml.j211
-rw-r--r--roles/ands_kaas/templates/50-kaas-pods.yml.j2166
-rw-r--r--roles/ands_storage/tasks/detect_device.yml3
-rw-r--r--roles/ands_storage/tasks/hostmount.yml5
-rw-r--r--roles/ands_storage/tasks/main.yml7
-rw-r--r--roles/glusterfs/defaults/main.yml1
-rw-r--r--roles/glusterfs/files/glusterblock-link.service8
-rw-r--r--roles/glusterfs/tasks/cfg/vols3.yml2
-rw-r--r--roles/glusterfs/tasks/common.yml7
-rw-r--r--roles/glusterfs/tasks/create_block.yml18
-rw-r--r--roles/glusterfs/tasks/create_domain.yml9
-rw-r--r--roles/glusterfs/tasks/create_volume.yml1
-rw-r--r--roles/glusterfs/tasks/data/vols2.yml2
-rw-r--r--roles/glusterfs/tasks/data/vols3.yml2
-rw-r--r--roles/glusterfs/tasks/db/vols3.yml2
-rw-r--r--roles/glusterfs/tasks/la/vols3.yml2
-rw-r--r--roles/glusterfs/tasks/setup-openshift-server.yml16
-rw-r--r--roles/ofed/files/rdma_limits.conf4
-rw-r--r--roles/ofed/tasks/main.yml8
-rw-r--r--roles/openshift_resource/tasks/template.yml11
35 files changed, 458 insertions, 62 deletions
diff --git a/roles/ands_backup/templates/backup.sh.j2 b/roles/ands_backup/templates/backup.sh.j2
index 74fff85..c362957 100755
--- a/roles/ands_backup/templates/backup.sh.j2
+++ b/roles/ands_backup/templates/backup.sh.j2
@@ -15,9 +15,13 @@ etcdctl3 () {
ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints "https://${hostname}:2379" ${@}
}
-
check=$(df | awk '{ print $6 }' | grep -P "^${volume_path}$")
-[ $? -ne 0 -o -z "$check" ] && { echo "The volume $volume_path is not mounted. Skipping..." ; exit 1 ; }
+if [ $? -ne 0 -o -z "$check" ]; then
+ echo "Mounting $volume_path"
+ mount "$volume_path"
+ check=$(df | awk '{ print $6 }' | grep -P "^${volume_path}$")
+ [ $? -ne 0 -o -z "$check" ] && { echo "The volume $volume_path is not mounted. Skipping..." ; exit 1 ; }
+fi
[ -d "$backup_path" ] && { echo "Something wrong, path $backup_path already exists..." ; exit 1 ; }
@@ -31,7 +35,13 @@ etcdctl3 --endpoints="192.168.213.1:2379" snapshot save "$backup_path/etcd/snaps
# heketi
mkdir -p "$backup_path/heketi" || { echo "Can't create ${backup_path}/heketi" ; exit 1 ; }
-heketi-cli -s http://heketi-storage.glusterfs.svc.cluster.local:8080 --user admin --secret "$(oc get secret heketi-storage-admin-secret -n glusterfs -o jsonpath='{.data.key}' | base64 -d)" topology info --json > "$backup_path/heketi/topology.json"
+heketi-cli -s http://heketi-storage.glusterfs.svc.cluster.local:8080 --user admin --secret "$(oc get secret heketi-storage-admin-secret -n glusterfs -o jsonpath='{.data.key}' | base64 -d)" topology info > "$backup_path/heketi/heketi_topology.json"
+heketi-cli -s http://heketi-storage.glusterfs.svc.cluster.local:8080 --user admin --secret "$(oc get secret heketi-storage-admin-secret -n glusterfs -o jsonpath='{.data.key}' | base64 -d)" db dump > "$backup_path/heketi/heketi_db.json"
+lvs > "$backup_path/heketi/lvs.txt" 2>/dev/null
+lvm fullreport --reportformat json > "$backup_path/heketi/lvm.json" 2>/dev/null
+gluster --xml volume info > "$backup_path/heketi/gluster-info.xml"
+gluster --xml volume status > "$backup_path/heketi/gluster-status.xml"
+gluster volume status > "$backup_path/heketi/gluster.txt"
{% endif %}
diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml
index bd23e13..ce5dd23 100644
--- a/roles/ands_facts/tasks/main.yml
+++ b/roles/ands_facts/tasks/main.yml
@@ -1,4 +1,9 @@
---
+# We need all mount points ready
+- name: "Run mount -a"
+ command: mount -a
+ changed_when: false
+
# Here we set 'openshift_hostname', 'openshift_ip' and other variables
- name: "Configuring network facts"
include_tasks: "network.yml"
diff --git a/roles/ands_facts/tasks/network.yml b/roles/ands_facts/tasks/network.yml
index 1acafc1..64ca15a 100644
--- a/roles/ands_facts/tasks/network.yml
+++ b/roles/ands_facts/tasks/network.yml
@@ -20,6 +20,7 @@
ands_openshift_public_hostname: "{{ ands_openshift_public_hostname | default(ands_openshift_default_hostname) }}"
ands_storage_cidr: "{{ ands_storage_network | default(ands_openshift_network) | ipaddr(ands_host_id) }}"
ands_storage_ip: "{{ ands_storage_network | default(ands_openshift_network) | ipaddr(ands_host_id) | ipaddr('address') }}"
+ ands_hostname_public: "ands_public{{ ands_host_id }}"
ands_hostname_storage: "ands_storage{{ ands_host_id }}"
ands_hostname_openshift: "ands_openshift{{ ands_host_id }}"
ands_openshift_set_hostname: "{{ ands_openshift_set_hostname }}"
diff --git a/roles/ands_facts/tasks/storage.yml b/roles/ands_facts/tasks/storage.yml
index 888ad70..b902a81 100644
--- a/roles/ands_facts/tasks/storage.yml
+++ b/roles/ands_facts/tasks/storage.yml
@@ -5,7 +5,9 @@
ands_configure_heketi: "{{ ands_configure_heketi }}"
- name: Detect Heketi
- set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}"
+ set_fact:
+ ands_storage_domains: "{{ ands_storage_domains | union([ands_heketi_domain]) }}"
+ ands_block_volumes: "{{ ands_block_volumes }}"
when:
- ands_configure_heketi
- ands_heketi_domain is defined
diff --git a/roles/ands_kaas/defaults/main.yml b/roles/ands_kaas/defaults/main.yml
index b2bfaf5..9a827ea 100644
--- a/roles/ands_kaas/defaults/main.yml
+++ b/roles/ands_kaas/defaults/main.yml
@@ -4,7 +4,9 @@ kaas_projects: "{{ ands_openshift_projects.keys() }}"
kaas_template_root: "{{ ands_paths.provision }}/kaas/"
kaas_glusterfs_endpoints: gfs
+kaas_storage_domains: "{{ ands_storage_domains | default({}) | union(ands_local_storage_domains | default({})) }}"
kaas_openshift_volumes: "{{ ands_openshift_volumes | default({}) }}"
+kaas_block_volumes: "{{ ands_block_volumes | default({}) }}"
kaas_openshift_files: "{{ ands_openshift_files | default([]) }}"
kaas_openshift_uids: "{{ ands_openshift_uids | default({}) }}"
@@ -17,3 +19,8 @@ kaas_default_file_owner: root
kaas_default_file_group: root
kaas_pod_history_limit: 1
+
+
+kaas_openshift_api_versions:
+ DeploymentConfig: 'v1'
+ StatefulSet: 'apps/v1beta1'
diff --git a/roles/ands_kaas/tasks/do_apps.yml b/roles/ands_kaas/tasks/do_apps.yml
new file mode 100644
index 0000000..6738b7f
--- /dev/null
+++ b/roles/ands_kaas/tasks/do_apps.yml
@@ -0,0 +1,16 @@
+- name: "Process KaaS apps"
+ include_tasks: "template.yml"
+ run_once: true
+ with_items: "{{ kaas_project_apps }}"
+ loop_control:
+ loop_var: appname
+ when:
+ - app.provision | default(true)
+ - (ands_configure_app == ands_none) or (app.name == ands_configure_app)
+ vars:
+ app: "{{ kaas_project_config[appname] }}"
+ name: "{{ app.name | default((app.pods.keys() | list)[0]) }}"
+ instantiate: "{{ app.instantiate | default(false) }}"
+ load: "{{ app.load | default(false) }}"
+ pods: "{{ app.pods }}"
+ tmpl_name: "50-kaas-pods.yml.j2"
diff --git a/roles/ands_kaas/tasks/do_project.yml b/roles/ands_kaas/tasks/do_project.yml
index 5cafe25..f5b3276 100644
--- a/roles/ands_kaas/tasks/do_project.yml
+++ b/roles/ands_kaas/tasks/do_project.yml
@@ -10,11 +10,16 @@
loop_control:
loop_var: osv
vars:
- query: "[*].volumes.{{osv.value.volume}}.mount"
- mntpath: "{{ (ands_storage_domains | json_query(query)) }}"
+ vt_query: "[*].volumes.{{osv.value.volume}}.type"
+ voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
+ mp_query: "[*].volumes.{{osv.value.volume}}.mount"
+ mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
+ rp_query: "[*].volumes.{{osv.value.volume}}.path"
+ realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
osvpath: "{{ osv.value.path | default('') }}"
prefix: "{{ ( osvpath[:1] == '/' ) | ternary('', '/' ~ kaas_project ~ '/') }}"
path: "{{ mntpath[0] ~ prefix ~ osvpath }}"
+ hostpath: "{{ realpath[0] is defined | ternary((realpath[0] | default('')) ~ prefix ~ osvpath, '') }}"
name: "{{osv.key}}"
volume: "{{osv.value}}"
when: ( mntpath | length ) > 0
@@ -35,8 +40,17 @@
loop_control:
loop_var: file
vars:
+ osv: "{{ kaas_project_volumes[file.osv] }}"
+ vt_query: "[*].volumes.{{osv.volume}}.type"
+ voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
+ mp_query: "[*].volumes.{{osv.volume}}.mount"
+ mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
+ rp_query: "[*].volumes.{{osv.volume}}.path"
+ realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
pvar: "kaas_{{ file.osv }}_path"
path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}"
+ hvar: "kaas_{{ file.osv }}_hostpath"
+ hostpath: "{{ hostvars[inventory_hostname][hvar] }}/{{ file.path }}"
when: file.osv in kaas_project_volumes
- name: Load OpenSSL keys
@@ -60,3 +74,5 @@
when:
- kaas_project_config.oc is undefined
+- name: Install Applications
+ include_tasks: do_apps.yml
diff --git a/roles/ands_kaas/tasks/do_storage.yml b/roles/ands_kaas/tasks/do_storage.yml
new file mode 100644
index 0000000..ee118fd
--- /dev/null
+++ b/roles/ands_kaas/tasks/do_storage.yml
@@ -0,0 +1,43 @@
+- name: Configure KaaS volumes
+ include_tasks: volume.yml
+ with_dict: "{{ kaas_project_volumes }}"
+ loop_control:
+ loop_var: osv
+ vars:
+ vt_query: "[*].volumes.{{osv.value.volume}}.type"
+ voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
+ mp_query: "[*].volumes.{{osv.value.volume}}.mount"
+ mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
+ rp_query: "[*].volumes.{{osv.value.volume}}.path"
+ realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
+ osvpath: "{{ osv.value.path | default('') }}"
+ prefix: "{{ ( osvpath[:1] == '/' ) | ternary('', '/' ~ kaas_project ~ '/') }}"
+ path: "{{ mntpath[0] ~ prefix ~ osvpath }}"
+ hostpath: "{{ realpath[0] is defined | ternary((realpath[0] | default('')) ~ prefix ~ osvpath, '') }}"
+ name: "{{osv.key}}"
+ volume: "{{osv.value}}"
+ when:
+ - ( mntpath | length ) > 0
+ - (osv.type | default("host")) in [ "host" ]
+
+
+- name: Configure KaaS files
+ include_tasks: file.yml
+ with_items: "{{ kaas_project_config.files | default(kaas_openshift_files) | default([]) }}"
+ loop_control:
+ loop_var: file
+ vars:
+ osv: "{{ kaas_project_volumes[file.osv] }}"
+ vt_query: "[*].volumes.{{osv.volume}}.type"
+ voltype: "{{ (kaas_storage_domains | json_query(vt_query)) }}"
+ mp_query: "[*].volumes.{{osv.volume}}.mount"
+ mntpath: "{{ (kaas_storage_domains | json_query(mp_query)) }}"
+ rp_query: "[*].volumes.{{osv.volume}}.path"
+ realpath: "{{ (kaas_storage_domains | json_query(rp_query)) }}"
+ pvar: "kaas_{{ file.osv }}_path"
+ path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}"
+ hvar: "kaas_{{ file.osv }}_hostpath"
+ hostpath: "{{ hostvars[inventory_hostname][hvar] }}/{{ file.path }}"
+ when:
+ - file.osv in kaas_project_volumes
+ - (osv.type | default("host")) in [ "host" ]
diff --git a/roles/ands_kaas/tasks/file.yml b/roles/ands_kaas/tasks/file.yml
index 488823b..393fe08 100644
--- a/roles/ands_kaas/tasks/file.yml
+++ b/roles/ands_kaas/tasks/file.yml
@@ -21,3 +21,12 @@
owner: "{{ owner }}"
group: "{{ group }}"
state: "{{ file.state | default('directory') }}"
+
+
+- name: "Setting selinux context in {{ path }}"
+ sefcontext: target="{{ hostpath }}" setype="svirt_sandbox_file_t" state="present" reload="yes"
+ when: voltype[0] == "host"
+
+- name: "Apply selinux context in {{ path }}"
+ shell: restorecon "{{ hostpath }}"
+ when: voltype[0] == "host"
diff --git a/roles/ands_kaas/tasks/main.yml b/roles/ands_kaas/tasks/main.yml
index 85110cb..f1cff02 100644
--- a/roles/ands_kaas/tasks/main.yml
+++ b/roles/ands_kaas/tasks/main.yml
@@ -2,11 +2,11 @@
- name: Provision OpenShift resources & configurations
# include_tasks: only_templates.yml
include_tasks: project.yml
- run_once: true
-# delegate_to: "{{ groups.masters[0] }}"
+ run_once: "{{ do_subrole in [ 'project', 'apps' ] }}"
with_items: "{{ (kaas_single_project is defined) | ternary([kaas_single_project], kaas_projects) }}"
loop_control:
loop_var: kaas_project
vars:
+ do_subrole: "{{ subrole | default('project') }}"
kaas_template_path: "{{ kaas_template_root }}/{{ kaas_project }}"
kaas_project_path: "{{playbook_dir}}/projects/{{ kaas_project }}"
diff --git a/roles/ands_kaas/tasks/project.yml b/roles/ands_kaas/tasks/project.yml
index b8574cf..ecb2035 100644
--- a/roles/ands_kaas/tasks/project.yml
+++ b/roles/ands_kaas/tasks/project.yml
@@ -18,6 +18,33 @@
var_name: "var_{{kaas_project}}_config"
when: hostvars[inventory_hostname][var_name] is not defined
+
+- name: Get information about block volumes
+ delegate_to: "{{ groups.masters[0] }}"
+ shell: gluster-block info {{ item.value.volume }}/{{ item.key }} | grep -oP '^GBID:\s*\K.*'
+ register: iqn_info
+ with_dict: "{{ kaas_block_volumes }}"
+ when: item.value.project == kaas_project
+
+- name: Get information about block volumes
+ delegate_to: "{{ groups.masters[0] }}"
+ shell: gluster-block info {{ item.value.volume }}/{{ item.key }} | grep -oP '^EXPORTED NODE.*:\s*\K.*' | tr ' ' '\n'
+ register: portal_info
+ with_dict: "{{ kaas_block_volumes }}"
+ when: item.value.project == kaas_project
+
+
+- set_fact:
+ kaas_block_iqn: "{{ {} }}"
+ kaas_block_portals: "{{ {} }}"
+
+- set_fact: "kaas_block_iqn={{ kaas_block_iqn | combine({item.item.key: item.stdout}) }}"
+ with_items: "{{ iqn_info.results }}"
+
+- set_fact: "kaas_block_portals={{ kaas_block_portals | combine({item.item.key: item.stdout_lines}) }}"
+ with_items: "{{ portal_info.results }}"
+
+
#- debug: msg="{{kaas_project_path}}"
#- debug:
# msg="{{kaas_project_config}}"
@@ -25,11 +52,14 @@
# var_name: "var_{{kaas_project}}_config"
# kaas_project_config: "{{hostvars[inventory_hostname][var_name]}}"
-- include_tasks: do_project.yml
+- include_tasks: "do_{{ do_subrole | default('project') }}.yml"
vars:
var_name: "var_{{kaas_project}}_config"
kaas_project_config: "{{ hostvars[inventory_hostname][var_name] }}"
kaas_project_volumes: "{{ kaas_project_config.volumes | default(kaas_project_config.extra_volumes | default({}) | combine(kaas_openshift_volumes)) }}"
kaas_project_pods: "{{ kaas_project_config.pods | default({}) }}"
+ kaas_project_apps: "{{ kaas_project_config.apps | default([]) }}"
kaas_project_gids: "{{ kaas_project_config.gids | default(kaas_openshift_gids) }}"
kaas_project_uids: "{{ kaas_project_config.uids | default(kaas_openshift_uids) }}"
+ kaas_blockvol_info: "{{ block_info }}"
+ \ No newline at end of file
diff --git a/roles/ands_kaas/tasks/template.yml b/roles/ands_kaas/tasks/template.yml
index 6c90b3d..418331a 100644
--- a/roles/ands_kaas/tasks/template.yml
+++ b/roles/ands_kaas/tasks/template.yml
@@ -1,6 +1,9 @@
- name: "Populate template {{ tmpl_name }}"
- template: src="{{ item }}" dest="{{ kaas_template_path }}/{{ item | basename | regex_replace('\.j2','') }}" owner=root group=root mode="0644"
+ template: src="{{ item }}" dest="{{ kaas_template_path }}/{{ dest_name }}" owner=root group=root mode="0644"
register: result
+ vars:
+ default_name: "{{ item | basename | regex_replace('\\.j2','') }}"
+ dest_name: "{{ (name is defined) | ternary ( (name | default('')) + '.yml', default_name ) }}"
with_first_found:
- paths:
- "{{ role_path }}/templates/"
@@ -10,8 +13,12 @@
- name: "Configure KaaS resources defined in {{ tmpl_name }}"
include_role: name="openshift_resource"
+ when: instantiate == true
vars:
template: "{{ tmpl_name | basename | regex_replace('\\.j2','') }}"
template_path: "{{ kaas_template_path }}"
project: "{{ kaas_project }}"
recreate: "{{ result | changed | ternary (true, false) }}"
+
+# alternatively load template
+# TODO
diff --git a/roles/ands_kaas/tasks/templates.yml b/roles/ands_kaas/tasks/templates.yml
index 9fc378f..4417cf3 100644
--- a/roles/ands_kaas/tasks/templates.yml
+++ b/roles/ands_kaas/tasks/templates.yml
@@ -19,6 +19,11 @@
with_items: "{{ sorted_tmpl }}"
vars:
sorted_tmpl: "{{ (results.results[0] is defined) | ternary (results | json_query('results[*].stdout_lines') | sum(start=[]) | map('basename') | sort | unique, []) }}"
+ instantiate: true
+ load: false
+ pods: "{{ kaas_project_pods }}"
loop_control:
loop_var: tmpl_name
+
+
diff --git a/roles/ands_kaas/tasks/volume.yml b/roles/ands_kaas/tasks/volume.yml
index 783654a..2c695f2 100644
--- a/roles/ands_kaas/tasks/volume.yml
+++ b/roles/ands_kaas/tasks/volume.yml
@@ -2,6 +2,9 @@
- name: "Configure {{ name }} fact"
set_fact: "kaas_{{ name }}_path={{ path }}"
+- name: "Configure {{ name }} fact"
+ set_fact: "kaas_{{ name }}_hostpath={{ hostpath }}"
+
- name: "Ensure {{ path }} exists"
file:
path: "{{ path }}"
@@ -41,5 +44,16 @@
- mkdir | changed
- chmod | skipped
+- name: "Setting SELinux context for non standard locations"
+ sefcontext: target="{{ hostpath }}" setype="svirt_sandbox_file_t" state="present" reload="yes"
+ when:
+ - mkdir | changed
+ - chmod | skipped
+ - voltype[0] == "host"
-
+- name: "Apply SELinux context for non standard locations"
+ shell: restorecon "{{ hostpath }}"
+ when:
+ - mkdir | changed
+ - chmod | skipped
+ - voltype[0] == "host"
diff --git a/roles/ands_kaas/templates/00-block-volumes.yml.j2 b/roles/ands_kaas/templates/00-block-volumes.yml.j2
new file mode 100644
index 0000000..9982d61
--- /dev/null
+++ b/roles/ands_kaas/templates/00-block-volumes.yml.j2
@@ -0,0 +1,48 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: {{ kaas_project }}-block-volumes
+ annotations:
+ descriptions: "{{ kaas_project }} glusterfs block volumes"
+objects:
+{% for name, vol in kaas_block_volumes.iteritems() %}
+{% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
+{% if oc_name | regex_search("^" + kaas_project) %}
+{% set pvname = oc_name %}
+{% else %}
+{% set pvname = (kaas_project + "-" + oc_name) | regex_replace('_','-') %}
+{% endif %}
+ - apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: {{ pvname }}
+ spec:
+ persistentVolumeReclaimPolicy: Retain
+ accessModes:
+ - ReadWriteOnce
+ iscsi:
+ fsType: xfs
+ iqn: iqn.2016-12.org.gluster-block:{{ kaas_block_iqn[name] }}
+ iscsiInterface: default
+ lun: 0
+ targetPortal: {{ kaas_block_portals[name][0] }}
+{% if kaas_block_portals[name] | length > 1 %}
+ portals: {{ kaas_block_portals[name][1:] | to_json }}
+{% endif %}
+ capacity:
+ storage: {{ vol.capacity | default(kaas_default_volume_capacity) }}
+ claimRef:
+ name: {{ oc_name }}
+ namespace: {{ kaas_project }}
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: {{ oc_name }}
+ spec:
+ volumeName: {{ pvname }}
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: {{ vol.capacity | default(kaas_default_volume_capacity) }}
+{% endfor %}
diff --git a/roles/ands_kaas/templates/00-gfs-volumes.yml.j2 b/roles/ands_kaas/templates/00-gfs-volumes.yml.j2
index a69942d..54064e4 100644
--- a/roles/ands_kaas/templates/00-gfs-volumes.yml.j2
+++ b/roles/ands_kaas/templates/00-gfs-volumes.yml.j2
@@ -7,6 +7,10 @@ metadata:
descriptions: "{{ kaas_project }} glusterfs volumes"
objects:
{% for name, vol in kaas_project_volumes.iteritems() %}
+{% set voltypes = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".type") %}
+{% set voltype = voltypes[0] | default('host') %}
+{% set mntpaths = kaas_storage_domains | json_query("[*].volumes." + vol.volume + ".mount") %}
+{% set mntpath = mntpaths[0] | default('') %}
{% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
{% set cfgpath = vol.path | default("") %}
{% set path = cfgpath if cfgpath[:1] == "/" else "/" + kaas_project + "/" + cfgpath %}
@@ -21,9 +25,14 @@ objects:
name: {{ pvname }}
spec:
persistentVolumeReclaimPolicy: Retain
+{% if voltype == 'host' %}
+ hostPath:
+ path: "{{ mntpath }}{{ path }}"
+{% else %}
glusterfs:
endpoints: {{ kaas_glusterfs_endpoints }}
- path: "{{ vol.volume }}{{path}}"
+ path: "{{ vol.volume }}{{ path }}"
+{% endif %}
readOnly: {{ not (vol.write | default(false)) }}
accessModes:
- {{ vol.access | default(vol.write | default(false) | ternary('ReadWriteMany', 'ReadOnlyMany')) }}
diff --git a/roles/ands_kaas/templates/50-kaas-pods.yml.j2 b/roles/ands_kaas/templates/50-kaas-pods.yml.j2
index ad1fc58..761004d 100644
--- a/roles/ands_kaas/templates/50-kaas-pods.yml.j2
+++ b/roles/ands_kaas/templates/50-kaas-pods.yml.j2
@@ -3,39 +3,65 @@
apiVersion: v1
kind: Template
metadata:
- name: {{ kaas_project }}-pods
+ name: {{ name | default(kaas_project) }}-pods
annotations:
- descriptions: {{ kaas_project_config.description | default(kaas_project ~ " auto-generated pod template") }}
+ descriptions: {{ kaas_project_config.description | default(name | default(kaas_project) ~ " auto-generated pod template") }}
objects:
-{% for name, pod in kaas_project_pods.iteritems() %}
- {% set pubkey = "kaas_" ~ name ~ "_pubkey" %}
- {% set privkey = "kaas_" ~ name ~ "_privkey" %}
- {% set cakey = "kaas_" ~ name ~ "_ca" %}
- {% if pod.variant is defined %}
- {% set pod = pod[pod.variant] %}
- {% endif %}
- {% set sched = pod.sched | default({}) %}
- {% set node_selector = (sched.selector is defined) | ternary(sched.selector, ands_default_node_selector | combine(sched.restrict | default({}))) %}
-
- {% if pod.service is defined %}
+{% for name, pod in pods.iteritems() %}
+ {% set kind = pod.kind | default('DeploymentConfig') %}
+ {% if pod.enabled | default(true) %}
+ {% set pubkey = "kaas_" ~ name ~ "_pubkey" %}
+ {% set privkey = "kaas_" ~ name ~ "_privkey" %}
+ {% set cakey = "kaas_" ~ name ~ "_ca" %}
+ {% if pod.variant is defined %}
+ {% set pod = pod[pod.variant] %}
+ {% endif %}
+ {% set sched = pod.sched | default({}) %}
+ {% set node_selector = (sched.selector is defined) | ternary(sched.selector, ands_default_node_selector | combine(sched.restrict | default({}))) %}
+ {% if pod.service is defined %}
+ {% if kind == 'StatefulSet' and pod.service.ports is defined %}
- apiVersion: v1
kind: Service
metadata:
- name: {{ pod.name | default(name) }}
+ name: {{ pod.name | default(name) }}-ss
+ annotations: {{ pod.service.annotations | default({}) | combine({"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true" }) | to_json }}
spec:
+ clusterIP: None
+ publishNotReadyAddresses: True
selector:
name: {{ pod.name | default(name) }}
- {% if pod.service.ports is defined %}
ports:
{% for port in pod.service.ports %}
- {% set portmap = (port | string).split('/') %}
+ {% set portmap = (port | string).split('/') %}
- name: "{{ portmap[0] }}"
port: {{ portmap[0] }}
targetPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }}
{% endfor %}
- {% endif %}
- {% if (pod.service.ports is defined) and (pod.service.host is defined) %}
- {% set first_port = (pod.service.ports[0] | string).split('/') %}
+ {% endif %}
+ - apiVersion: v1
+ kind: Service
+ metadata:
+ name: {{ pod.name | default(name) }}
+ {% if pod.service.annotations is defined %}
+ annotations: {{ pod.service.annotations | to_json }}
+ {% endif %}
+ spec:
+ selector:
+ name: {{ pod.name | default(name) }}
+ {% if pod.service.ip is defined %}
+ clusterIP: {{ pod.service.ip }}
+ {% endif %}
+ {% if pod.service.ports is defined %}
+ ports:
+ {% for port in pod.service.ports %}
+ {% set portmap = (port | string).split('/') %}
+ - name: "{{ portmap[0] }}"
+ port: {{ portmap[0] }}
+ targetPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }}
+ {% endfor %}
+ {% endif %}
+ {% if (pod.service.ports is defined) and (pod.service.host is defined) %}
+ {% set first_port = (pod.service.ports[0] | string).split('/') %}
- apiVersion: v1
kind: Route
metadata:
@@ -47,27 +73,27 @@ objects:
name: {{ pod.name | default(name) }}
port:
targetPort: {{ (first_port[1] is defined) | ternary(first_port[1], first_port[0]) }}
- {% if (first_port[0] == "80") %}
+ {% if (first_port[0] == "80") %}
tls:
termination: edge
insecureEdgeTerminationPolicy: Allow
- {% if hostvars[inventory_hostname][pubkey] is defined %}
+ {% if hostvars[inventory_hostname][pubkey] is defined %}
certificate: |-
{{ hostvars[inventory_hostname][pubkey] | indent(10) }}
- {% endif %}
- {% if hostvars[inventory_hostname][privkey] is defined %}
+ {% endif %}
+ {% if hostvars[inventory_hostname][privkey] is defined %}
key: |-
{{ hostvars[inventory_hostname][privkey] | indent(10) }}
- {% endif %}
- {% if hostvars[inventory_hostname][cakey] is defined %}
+ {% endif %}
+ {% if hostvars[inventory_hostname][cakey] is defined %}
caCertificate: |-
{{ hostvars[inventory_hostname][cakey] | indent(10) }}
+ {% endif %}
{% endif %}
{% endif %}
{% endif %}
- {% endif %}
- - apiVersion: v1
- kind: DeploymentConfig
+ - apiVersion: {{ kaas_openshift_api_versions[kind] | default('v1') }}
+ kind: {{ kind }}
metadata:
name: {{ pod.name | default(name) }}
spec:
@@ -75,13 +101,32 @@ objects:
revisionHistoryLimit: 2
strategy:
type: {{ (sched | default({})).strategy | default('Rolling') }}
+ updateStrategy:
+ {% if pod.update %}
+ type: {{ pod.update.strategy | default('OnDelete') }}
+ {% if pod.update.min_ready is defined %}
+ minReadySeconds: {{ pod.update.min_ready }}
+ {% endif %}
+ {% endif %}
triggers:
- type: ConfigChange
+ {% if kind == 'StatefulSet' %}
+ serviceName: {{ pod.name | default(name) }}-ss
+ selector:
+ matchLabels:
+ name: {{ pod.name | default(name) }}
+ {% else %}
selector:
name: {{ pod.name | default(name) }}
+ {% endif %}
template:
metadata:
name: {{ pod.name | default(name) }}
+ {% if kind == 'StatefulSet' %}
+ annotations: {{ pod.annotations | default({}) | combine({"pod.alpha.kubernetes.io/initialized": "true"}) | to_json }}
+ {% elif pod.annotations is defined %}
+ annotations: {{ pod.annotations | to_json }}
+ {% endif %}
labels:
name: {{ pod.name | default(name) }}
spec:
@@ -89,16 +134,22 @@ objects:
nodeSelector: {{ node_selector | to_json }}
{% endif %}
{% set mappings = (pod.images | json_query('[*].mappings') | length) %}
- {% if mappings > 0 %}
+ {% set paths = (pod.images | json_query('[*].hostpath') | length) %}
+ {% if mappings > 0 or paths > 0 %}
volumes:
{% for img in pod.images %}
{% set imgidx = loop.index %}
- {% for vol in img.mappings %}
+ {% for vol in (img.mappings | default([])) %}
{% set oc_name = vol.name | default(name) | regex_replace('_','-') %}
- name: vol-{{imgidx}}-{{loop.index}}
persistentVolumeClaim:
claimName: {{ oc_name }}
{% endfor %}
+ {% for vol in (img.hostpath | default([])) %}
+ - name: host-{{imgidx}}-{{loop.index}}
+ hostPath:
+ path: {{ vol.path }}
+ {% endfor %}
{% endfor %}
{% endif %}
{% if (pod.groups is defined) or (pod.run_as is defined) %}
@@ -121,21 +172,31 @@ objects:
{% set imgidx = loop.index %}
- name: {{ img.name | default(pod.name) | default(name) }}
image: {{ img.image }}
- imagePullPolicy: Always
- ports:
+ imagePullPolicy: {{ img.pull | default('Always') }}
+ {% if (img.command is defined) %}
+ command: {{ img.command | to_json }}
+ {% endif %}
{% if img.ports is defined %}
+ ports:
{% for port in img.ports %}
- containerPort: {{ port }}
{% endfor %}
- {% else %}
+ {% elif pod.service.ports is defined %}
+ ports:
{% for port in pod.service.ports %}
{% set portmap = (port | string).split('/') %}
- containerPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }}
{% endfor %}
{% endif %}
+ {% if kind == 'StatefulSet' %}
+ {% set extra_env = [ { "name": "POD_NAMESPACE", "value": "fieldref@metadata.namespace" }, { "name": "POD_REPLICAS", "value": sched.replicas } ] %}
+ {% set env = img.env | default([]) | union(extra_env) %}
+ {% elif img.env is defined %}
+ {% set env = img.env %}
+ {% endif %}
{% if img.env is defined %}
env:
- {% for env_item in img.env %}
+ {% for env_item in env %}
{% set env_name = env_item.name %}
{% set env_val = env_item.value %}
{% set env_parts = (env_val | string).split('@') %}
@@ -152,18 +213,50 @@ objects:
configMapKeyRef:
name: {{ env_cm[0] }}
key: {{ env_cm[1] }}
+ {% elif env_parts[0] == "fieldref" %}
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: {{ env_parts[1] }}
{% else %}
value: "{{ env_val }}"
{% endif %}
{% endfor %}
{% endif %}
- {% if img.mappings is defined %}
+ {% if img.mappings is defined or img.hostpath is defined %}
volumeMounts:
- {% for vol in img.mappings %}
+ {% for vol in (img.mappings | default([])) %}
- name: vol-{{imgidx}}-{{loop.index}}
subPath: {{ vol.path | default("") }}
mountPath: {{ vol.mount }}
{% endfor %}
+ {% for vol in (img.hostpath | default([])) %}
+ - name: host-{{imgidx}}-{{loop.index}}
+ mountPath: {{ vol.mount }}
+ {% endfor %}
+ {% endif %}
+ {% if img.resources is defined %}
+ resources:
+ {% if img.resources.request is defined %}
+ {% set res = img.resources.request %}
+ requests:
+ {% if res.cpu %}
+ cpu: {{ res.cpu }}
+ {% endif %}
+ {% if res.cpu %}
+ memory: {{ res.mem }}
+ {% endif %}
+ {% endif %}
+ {% if img.resources.limit is defined %}
+ {% set res = img.resources.limit %}
+ limits:
+ {% if res.cpu %}
+ cpu: {{ res.cpu }}
+ {% endif %}
+ {% if res.cpu %}
+ memory: {{ res.mem }}
+ {% endif %}
+ {% endif %}
{% endif %}
{% if img.probes is defined %}
{% for probe in img.probes %}
@@ -201,4 +294,5 @@ objects:
{% endfor %}
{% endif %}
{% endfor %}
+ {% endif %}
{% endfor %}
diff --git a/roles/ands_storage/tasks/detect_device.yml b/roles/ands_storage/tasks/detect_device.yml
index 3467371..f0245f3 100644
--- a/roles/ands_storage/tasks/detect_device.yml
+++ b/roles/ands_storage/tasks/detect_device.yml
@@ -4,9 +4,12 @@
# when: item.mount == ands_data_path
- name: find large block devices
+# no_log: true
set_fact: ands_data_device="/dev/{{ item.key }}"
# debug: msg="{{ item.key }} - {{ (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 }} GB"
with_dict: "{{ ansible_devices }}"
+ loop_control:
+ label: "{{ item.key }} of {{ (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 }} GB"
when:
- not ands_data_device is defined
- not item.value.partitions
diff --git a/roles/ands_storage/tasks/hostmount.yml b/roles/ands_storage/tasks/hostmount.yml
new file mode 100644
index 0000000..e4f301f
--- /dev/null
+++ b/roles/ands_storage/tasks/hostmount.yml
@@ -0,0 +1,5 @@
+- file: path="{{ item.value.path }}" state=directory
+ with_dict: "{{ domain.volumes }}"
+
+- mount: src="{{ item.value.path }}" name="{{ item.value.mount }}" opts=bind fstype=none state=mounted
+ with_dict: "{{ domain.volumes }}"
diff --git a/roles/ands_storage/tasks/main.yml b/roles/ands_storage/tasks/main.yml
index 43d4692..8e9d44b 100644
--- a/roles/ands_storage/tasks/main.yml
+++ b/roles/ands_storage/tasks/main.yml
@@ -48,4 +48,9 @@
- name: Mount Ands Data Volume
mount: name="{{ ands_data_path }}" src="/dev/{{ ands_data_vg }}/{{ ands_data_lv }}" fstype="{{ ands_data_fs }}" opts="defaults" state="mounted"
- \ No newline at end of file
+- name: Provision Ands local storage domains
+ include_tasks: hostmount.yml
+ with_items: "{{ ands_local_storage_domains | default([]) }}"
+ when: domain.servers | intersect(group_names) | length > 0
+ loop_control:
+ loop_var: domain
diff --git a/roles/glusterfs/defaults/main.yml b/roles/glusterfs/defaults/main.yml
index 700838d..d66ff5e 100644
--- a/roles/glusterfs/defaults/main.yml
+++ b/roles/glusterfs/defaults/main.yml
@@ -6,6 +6,7 @@ glusterfs_network: "{{ ands_storage_network }}"
glusterfs_servers: "{{ ands_storage_servers }}"
glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs"
glusterfs_domains: "{{ ands_storage_domains }}"
+glusterfs_block_volumes: "{{ ands_block_volumes | default({}) }}"
glusterfs_all_subroles: "{{ [ 'software', 'volumes' ] }}"
glusterfs_subroles: "{{ ( subrole is defined ) | ternary( [ subrole ], glusterfs_all_subroles ) }}"
diff --git a/roles/glusterfs/files/glusterblock-link.service b/roles/glusterfs/files/glusterblock-link.service
new file mode 100644
index 0000000..9aecd40
--- /dev/null
+++ b/roles/glusterfs/files/glusterblock-link.service
@@ -0,0 +1,8 @@
+[Unit]
+After=origin-node.service
+
+[Service]
+ExecStart=/usr/bin/ln -sf /run/glusterd/gluster-blockd.socket /run/gluster-blockd.socket
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/glusterfs/tasks/cfg/vols3.yml b/roles/glusterfs/tasks/cfg/vols3.yml
index d8ed728..efd613c 100644
--- a/roles/glusterfs/tasks/cfg/vols3.yml
+++ b/roles/glusterfs/tasks/cfg/vols3.yml
@@ -7,7 +7,7 @@
cluster: "{{ domain_servers | join(',') }}"
replicas: "{{ domain_servers | length }}"
bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}"
- transport: "{{ glusterfs_transport }}"
+ transport: "{{ transport }}"
- name: "Start {{ name }} volume"
diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml
index 67fb815..c94f86e 100644
--- a/roles/glusterfs/tasks/common.yml
+++ b/roles/glusterfs/tasks/common.yml
@@ -8,8 +8,13 @@
- glusterfs-cli
- glusterfs-fuse
- glusterfs-rdma
- - heketi-client
- libsemanage-python
+
+- name: Ensure GlusterFS is installed
+ yum: name={{item}} state=latest enablerepo="centos-gluster{{ glusterfs_version }}-test"
+ with_items:
+ - heketi-client
+ - gluster-block
- name: Allow fuse in SELinux configuration
seboolean: name="virt_sandbox_use_fusefs" state="yes" persistent="yes"
diff --git a/roles/glusterfs/tasks/create_block.yml b/roles/glusterfs/tasks/create_block.yml
new file mode 100644
index 0000000..5b30f02
--- /dev/null
+++ b/roles/glusterfs/tasks/create_block.yml
@@ -0,0 +1,18 @@
+- name: Check if the holding volume already exists
+ shell: "gluster volume info {{ block.value.volume }}"
+ changed_when: false
+ register: gv_results
+
+- name: Get list of existing block volumes
+ shell: "gluster-block list {{ block.value.volume }}"
+ changed_when: false
+ register: bv_results
+
+- name: Create block volume
+ shell: "gluster-block create {{ block.value.volume }}/{{ block.key }} ha {{ servers | length }} auth disable prealloc no {{ servers | join(',') }} {{ block.value.capacity }}"
+ when: block.key not in bv_results.stdout_lines
+ vars:
+ ha: "{{ block.value.ha | default(3) }}"
+ servers: "{{ domain_servers[0:(ha | int)] }}"
+ loop_control:
+ loop_var: volume
diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml
index 76623f2..99f9959 100644
--- a/roles/glusterfs/tasks/create_domain.yml
+++ b/roles/glusterfs/tasks/create_domain.yml
@@ -14,3 +14,12 @@
domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}"
loop_control:
loop_var: volume
+
+- name: Create block volumes
+ include_tasks: create_block.yml
+ when: block.value.volume in domain.volumes.keys()
+ with_dict: "{{ glusterfs_block_volumes }}"
+ vars:
+ domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}"
+ loop_control:
+ loop_var: block
diff --git a/roles/glusterfs/tasks/create_volume.yml b/roles/glusterfs/tasks/create_volume.yml
index ca4f39a..a94b96f 100644
--- a/roles/glusterfs/tasks/create_volume.yml
+++ b/roles/glusterfs/tasks/create_volume.yml
@@ -2,3 +2,4 @@
- include_tasks: "{{ volume.value.type }}/vols{{((domain_servers | length) < 4) | ternary((domain_servers | length), 3) }}.yml"
vars:
name: "{{ volume.key }}"
+ transport: "{{ volume.value.transport | default(glusterfs_transport) }}"
diff --git a/roles/glusterfs/tasks/data/vols2.yml b/roles/glusterfs/tasks/data/vols2.yml
index d8ed728..efd613c 100644
--- a/roles/glusterfs/tasks/data/vols2.yml
+++ b/roles/glusterfs/tasks/data/vols2.yml
@@ -7,7 +7,7 @@
cluster: "{{ domain_servers | join(',') }}"
replicas: "{{ domain_servers | length }}"
bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}"
- transport: "{{ glusterfs_transport }}"
+ transport: "{{ transport }}"
- name: "Start {{ name }} volume"
diff --git a/roles/glusterfs/tasks/data/vols3.yml b/roles/glusterfs/tasks/data/vols3.yml
index 14c3763..f28a38c 100644
--- a/roles/glusterfs/tasks/data/vols3.yml
+++ b/roles/glusterfs/tasks/data/vols3.yml
@@ -8,7 +8,7 @@
replicas: 3
arbiters: 1
bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}"
- transport: "{{ glusterfs_transport }}"
+ transport: "{{ transport }}"
- name: "Start {{ name }} volume"
diff --git a/roles/glusterfs/tasks/db/vols3.yml b/roles/glusterfs/tasks/db/vols3.yml
index cbd238d..45cb0ce 100644
--- a/roles/glusterfs/tasks/db/vols3.yml
+++ b/roles/glusterfs/tasks/db/vols3.yml
@@ -8,7 +8,7 @@
disperses: "3"
redundancies: "1"
bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}"
- transport: "{{ glusterfs_transport }}"
+ transport: "{{ transport }}"
- name: "Start {{ name }} volume"
diff --git a/roles/glusterfs/tasks/la/vols3.yml b/roles/glusterfs/tasks/la/vols3.yml
index ada8f95..af1e889 100644
--- a/roles/glusterfs/tasks/la/vols3.yml
+++ b/roles/glusterfs/tasks/la/vols3.yml
@@ -6,7 +6,7 @@
host: "{{ ands_storage_hostname }}"
cluster: "{{ domain_servers | join(',') }}"
bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}"
- transport: "{{ glusterfs_transport }}"
+ transport: "{{ transport }}"
- name: "Start {{ name }} volume"
gluster_volume: state="started" name="{{ name }}"
diff --git a/roles/glusterfs/tasks/setup-openshift-server.yml b/roles/glusterfs/tasks/setup-openshift-server.yml
index 20ebbf8..c4fcbcc 100644
--- a/roles/glusterfs/tasks/setup-openshift-server.yml
+++ b/roles/glusterfs/tasks/setup-openshift-server.yml
@@ -1,9 +1,19 @@
---
- name: Link control socket
- file: src="/run/glusterd/glusterd.socket" dest="/run/glusterd.socket" state="link"
+ file: src="/run/glusterd/{{ item }}" dest="/run/{{ item }}" state="link"
+ with_items:
+ - glusterd.socket
+ - gluster-blockd.socket
- name: Copy systemd unit to recreate link on re-start
- copy: src="gluster-link.service" dest="/etc/systemd/system/gluster-link.service" owner="root" group="root" mode="0644"
+ copy: src="{{ item }}" dest="/etc/systemd/system/{{ item }}" owner="root" group="root" mode="0644"
+ with_items:
+ - gluster-link.service
+ - glusterblock-link.service
- name: Enable systemd unit
- systemd: enabled=true name=gluster-link daemon_reload=yes \ No newline at end of file
+ systemd: enabled=true name={{ item }} daemon_reload=yes
+ with_items:
+ - gluster-link.service
+ - glusterblock-link.service
+ - rpcbind
diff --git a/roles/ofed/files/rdma_limits.conf b/roles/ofed/files/rdma_limits.conf
new file mode 100644
index 0000000..9a34ae4
--- /dev/null
+++ b/roles/ofed/files/rdma_limits.conf
@@ -0,0 +1,4 @@
+# configuration for rdma tuning
+* soft memlock unlimited
+* hard memlock unlimited
+# rdma tuning end
diff --git a/roles/ofed/tasks/main.yml b/roles/ofed/tasks/main.yml
index bd85d43..df8392d 100644
--- a/roles/ofed/tasks/main.yml
+++ b/roles/ofed/tasks/main.yml
@@ -1,6 +1,12 @@
- name: install the 'Infiniband support' package group
yum: name="@Infiniband Support" state=present
-
+
+
+# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/networking_guide/sec-configuring_the_base_rdma_subsystem
+- name: Allow users unrestricted page-locking
+ copy: src="rdma_limits.conf" dest="/etc/security/limits.d/50-rdma.conf" owner="root" group="root" mode="0644"
+
- name: start rdma service
service: name="rdma" enabled=yes state=started
+
\ No newline at end of file
diff --git a/roles/openshift_resource/tasks/template.yml b/roles/openshift_resource/tasks/template.yml
index 7e74de4..188599f 100644
--- a/roles/openshift_resource/tasks/template.yml
+++ b/roles/openshift_resource/tasks/template.yml
@@ -4,14 +4,16 @@
set_fact: resources="{{ tmpl | json_query(query) }}"
vars:
query: "objects[*].{kind: kind, name: metadata.name}"
-
+
+ - set_fact: resources="{{ [] }}"
+ when: resources == ""
+
- name: "{{ template }}: Lookup the specified resource in {{project}}"
command: "oc get -n {{project}} {{item.kind}}/{{item.name}}"
register: results
failed_when: false
changed_when: (results | failed)
with_items: "{{ resources | default([]) }}"
-# when: not (recreate|default(false))
- name: "{{ template }}: Detroy existing resources in {{project}}"
command: "oc delete -n {{project}} {{resources[item|int].kind}}/{{resources[item|int].name}}"
@@ -21,5 +23,8 @@
- name: "{{ template }}: Populate resources to {{project}}"
shell: "oc process -n {{project}} -f '{{ template_path }}/{{template}}' {{ template_args | default('') }} | oc create -n {{project}} -f - {{ create_args | default('') }}"
- when: (recreate|default(false)) or (results | changed)
+ when:
+ - (recreate|default(false)) or (results | changed)
+ - resources | length > 0
+
run_once: true