From e7ed329bd81c2273c03e94c93c9ce9c1d01cdc86 Mon Sep 17 00:00:00 2001 From: "Suren A. Chilingaryan" Date: Sat, 1 Apr 2017 04:53:28 +0200 Subject: Initial import --- roles/glusterfs/README | 26 ++++++++++++++++++++++++++ roles/glusterfs/defaults/main.yml | 11 +++++++++++ roles/glusterfs/tasks/cfg/vols2.yml | 1 + roles/glusterfs/tasks/cfg/vols3.yml | 13 +++++++++++++ roles/glusterfs/tasks/common.yml | 16 ++++++++++++++++ roles/glusterfs/tasks/create_domain.yml | 8 ++++++++ roles/glusterfs/tasks/create_volume.yml | 4 ++++ roles/glusterfs/tasks/main.yml | 13 +++++++++++++ roles/glusterfs/tasks/mount_domain.yml | 12 ++++++++++++ roles/glusterfs/tasks/mount_volume.yml | 8 ++++++++ roles/glusterfs/tasks/server.yml | 31 +++++++++++++++++++++++++++++++ roles/glusterfs/tasks/tmp/vols2.yml | 1 + roles/glusterfs/tasks/tmp/vols3.yml | 11 +++++++++++ roles/glusterfs/tasks/volumes.yml | 15 +++++++++++++++ 14 files changed, 170 insertions(+) create mode 100644 roles/glusterfs/README create mode 100644 roles/glusterfs/defaults/main.yml create mode 120000 roles/glusterfs/tasks/cfg/vols2.yml create mode 100644 roles/glusterfs/tasks/cfg/vols3.yml create mode 100644 roles/glusterfs/tasks/common.yml create mode 100644 roles/glusterfs/tasks/create_domain.yml create mode 100644 roles/glusterfs/tasks/create_volume.yml create mode 100644 roles/glusterfs/tasks/main.yml create mode 100644 roles/glusterfs/tasks/mount_domain.yml create mode 100644 roles/glusterfs/tasks/mount_volume.yml create mode 100644 roles/glusterfs/tasks/server.yml create mode 120000 roles/glusterfs/tasks/tmp/vols2.yml create mode 100644 roles/glusterfs/tasks/tmp/vols3.yml create mode 100644 roles/glusterfs/tasks/volumes.yml (limited to 'roles/glusterfs') diff --git a/roles/glusterfs/README b/roles/glusterfs/README new file mode 100644 index 0000000..9a319d0 --- /dev/null +++ b/roles/glusterfs/README @@ -0,0 +1,26 @@ +Dependencies: + - Executed on all nodes. + * The GlusteFS servers are configured on all storage servers. + * The GlusterFS clients on all the servers + * The volumes are created in the configured domains + - Expects that partition for bricks is already prepared + +Parameters: + glusterfs_version: should be defined (without dot, like 39) + glusterfs_transport: Transport to use, defaults to rdma + + glusterfs_network: CIDR for gluster internal Infiniband network + - if 192.168.12.0/24 is specified, the 'ipekatrin1' storage node will be mapped '192.168.12.1' IP, etc. + glusterfs_servers: List of storage servers in glusterfs_network + glusterfs_bricks_path: The location to store volume bricks, defaults to 'ands_data_path'/glusterfs + glusterfs_domains: Volume configuration + +Facts: + +Actions: + - Installs appropriate GlusterFS repositories (to match specified version) + - Installs required packages, only native clients on the servers without storage + - Enables firewalld if necessary and allows GlusterFS service + - Configures SELinux, etc. + - Probes all storage nodes using internal Infiniband IPs + - Creates requested volumes and mounts them diff --git a/roles/glusterfs/defaults/main.yml b/roles/glusterfs/defaults/main.yml new file mode 100644 index 0000000..9587a9b --- /dev/null +++ b/roles/glusterfs/defaults/main.yml @@ -0,0 +1,11 @@ +--- +glusterfs_version: 39 +glusterfs_transport: rdma + +glusterfs_network: "{{ ands_storage_network }}" +glusterfs_servers: "{{ ands_storage_servers }}" +glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" +glusterfs_domains: "{{ ands_storage_domains }}" + +glusterfs_all_subroles: "{{ [ 'software', 'volumes' ] }}" +glusterfs_subroles: "{{ ( subrole is defined ) | ternary( [ subrole ], glusterfs_all_subroles ) }}" diff --git a/roles/glusterfs/tasks/cfg/vols2.yml b/roles/glusterfs/tasks/cfg/vols2.yml new file mode 120000 index 0000000..b6a3e8f --- /dev/null +++ b/roles/glusterfs/tasks/cfg/vols2.yml @@ -0,0 +1 @@ +vols3.yml \ No newline at end of file diff --git a/roles/glusterfs/tasks/cfg/vols3.yml b/roles/glusterfs/tasks/cfg/vols3.yml new file mode 100644 index 0000000..d094797 --- /dev/null +++ b/roles/glusterfs/tasks/cfg/vols3.yml @@ -0,0 +1,13 @@ +--- +- name: "Create {{ name }} volume" + gluster_volume: + state: present + name: "{{ name }}" + cluster: "{{ domain_servers | join(',') }}" + replicas: "{{ domain_servers | length }}" + bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}" + transport: "{{ glusterfs_transport }}" + + +- name: "Start {{ name }} volume" + gluster_volume: state="started" name="{{ name }}" diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml new file mode 100644 index 0000000..7675cb9 --- /dev/null +++ b/roles/glusterfs/tasks/common.yml @@ -0,0 +1,16 @@ +--- +- name: Ensure GlusterFS repositories are present + yum: name="centos-release-gluster{{ glusterfs_version }}" state=present + +- name: Ensure GlusterFS is installed + yum: name={{item}} state=present + with_items: + - glusterfs-cli + - glusterfs-fuse + - glusterfs-libs + - glusterfs-rdma + - glusterfs + - libsemanage-python + +- name: Allow fuse in SELinux configuration + seboolean: name="virt_sandbox_use_fusefs" state="yes" persistent="yes" diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml new file mode 100644 index 0000000..b3fc89e --- /dev/null +++ b/roles/glusterfs/tasks/create_domain.yml @@ -0,0 +1,8 @@ +--- +- name: Configure volumes + include: create_volume.yml + with_dict: "{{ domain.volumes }}" + vars: + domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" + loop_control: + loop_var: volume diff --git a/roles/glusterfs/tasks/create_volume.yml b/roles/glusterfs/tasks/create_volume.yml new file mode 100644 index 0000000..9b955b0 --- /dev/null +++ b/roles/glusterfs/tasks/create_volume.yml @@ -0,0 +1,4 @@ +--- +- include: "{{ volume.value.type }}/vols{{((domain_servers | length) < 4) | ternary((domain_servers | length), 3) }}.yml" + vars: + name: "{{ volume.key }}" diff --git a/roles/glusterfs/tasks/main.yml b/roles/glusterfs/tasks/main.yml new file mode 100644 index 0000000..dbd1aad --- /dev/null +++ b/roles/glusterfs/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: common.yml + when: + - "'software' in glusterfs_subroles" + +- include: server.yml + when: + - "'software' in glusterfs_subroles" + - "'ands_storage_servers' in group_names" + +- include: volumes.yml + when: + - "'volumes' in glusterfs_subroles" diff --git a/roles/glusterfs/tasks/mount_domain.yml b/roles/glusterfs/tasks/mount_domain.yml new file mode 100644 index 0000000..94b6677 --- /dev/null +++ b/roles/glusterfs/tasks/mount_domain.yml @@ -0,0 +1,12 @@ +--- +- name: Mount volumes + include: mount_volume.yml + with_dict: "{{ domain.volumes }}" + vars: + name: "{{ volume.key }}" + path: "{{ volume.value.mount }}" + server_group: "{{ domain.servers }}" + domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" + when: volume.value.mount is defined + loop_control: + loop_var: volume diff --git a/roles/glusterfs/tasks/mount_volume.yml b/roles/glusterfs/tasks/mount_volume.yml new file mode 100644 index 0000000..2aea7f6 --- /dev/null +++ b/roles/glusterfs/tasks/mount_volume.yml @@ -0,0 +1,8 @@ +--- +- name: Mount {{ name }} volume + mount: name="{{ path }}" src="localhost:{{ name }}" fstype="glusterfs" opts="defaults,_netdev" state="mounted" + when: server_group in group_names + +- name: Mount {{ name }} volume + mount: name="{{ path }}" src="{{ domain_servers | join(",") }}:{{ name }}" fstype="glusterfs" opts="defaults,_netdev" state="mounted" + when: not server_group in group_names diff --git a/roles/glusterfs/tasks/server.yml b/roles/glusterfs/tasks/server.yml new file mode 100644 index 0000000..328a8c5 --- /dev/null +++ b/roles/glusterfs/tasks/server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure GlusterFS is installed + yum: name={{item}} state=present + with_items: + - glusterfs-server + - glusterfs-rdma + +- name: Ensure GlusterFS service is running + service: name=glusterd state=started enabled=yes + +- name: Ensure firewalld is running + service: name=firewalld state=started enabled=yes + +- name: Configure firewalld + firewalld: rich_rule="rule family=ipv4 source address={{glusterfs_network}} service name=glusterfs accept" state="enabled" permanent="true" immediate="true" + when: glusterfs_network is defined + +- name: Configure firewalld + firewalld: service="glusterfs" state="enabled" permanent="true" immediate="true" + when: not glusterfs_network is defined + +- name: Reload firewalld rules + shell: firewall-cmd --reload + +- name: Create folder for GlusterFS bricks + file: dest="{{glusterfs_bricks_path}}" owner="root" group="root" mode="0755" state="directory" + +- name: Configure gluster peers (on first host) + shell: gluster peer probe {{item}} + run_once: true + with_items: "{{ glusterfs_servers }}" diff --git a/roles/glusterfs/tasks/tmp/vols2.yml b/roles/glusterfs/tasks/tmp/vols2.yml new file mode 120000 index 0000000..b6a3e8f --- /dev/null +++ b/roles/glusterfs/tasks/tmp/vols2.yml @@ -0,0 +1 @@ +vols3.yml \ No newline at end of file diff --git a/roles/glusterfs/tasks/tmp/vols3.yml b/roles/glusterfs/tasks/tmp/vols3.yml new file mode 100644 index 0000000..9565bb3 --- /dev/null +++ b/roles/glusterfs/tasks/tmp/vols3.yml @@ -0,0 +1,11 @@ +--- +- name: "Create {{ name }} volume" + gluster_volume: + state: present + name: "{{ name }}" + cluster: "{{ domain_servers | join(',') }}" + bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}" + transport: "{{ glusterfs_transport }}" + +- name: "Start {{ name }} volume" + gluster_volume: state="started" name="{{ name }}" diff --git a/roles/glusterfs/tasks/volumes.yml b/roles/glusterfs/tasks/volumes.yml new file mode 100644 index 0000000..e393c08 --- /dev/null +++ b/roles/glusterfs/tasks/volumes.yml @@ -0,0 +1,15 @@ +- name: Configure volume domains + include: create_domain.yml + run_once: true + delegate_to: "{{ groups[domain.servers][0] }}" + with_items: "{{ glusterfs_domains }}" + loop_control: + loop_var: domain + +- name: Mount volume domains + include: mount_domain.yml + when: ( domain.clients | default("---") ) in group_names + with_items: "{{ glusterfs_domains }}" + loop_control: + loop_var: domain + -- cgit v1.2.3