From 1a72183498f89644aacd32ae52ed3a65d85c86b3 Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Tue, 14 Mar 2017 19:04:58 -0500 Subject: GlusterFS playbook and role Signed-off-by: Jose A. Rivera --- playbooks/common/openshift-glusterfs/config.yml | 21 +++ .../common/openshift-glusterfs/filter_plugins | 1 + .../common/openshift-glusterfs/lookup_plugins | 1 + playbooks/common/openshift-glusterfs/roles | 1 + roles/openshift_storage_glusterfs/README.md | 60 +++++++ .../openshift_storage_glusterfs/defaults/main.yml | 17 ++ .../files/v1.6/deploy-heketi-template.yml | 115 +++++++++++++ .../files/v1.6/glusterfs-registry-service.yml | 10 ++ .../files/v1.6/glusterfs-template.yml | 128 +++++++++++++++ .../files/v1.6/heketi-template.yml | 113 +++++++++++++ .../filter_plugins/openshift_storage_glusterfs.py | 23 +++ roles/openshift_storage_glusterfs/meta/main.yml | 15 ++ .../tasks/glusterfs_deploy.yml | 107 ++++++++++++ .../tasks/glusterfs_registry.yml | 48 ++++++ .../tasks/heketi_deploy_part1.yml | 41 +++++ .../tasks/heketi_deploy_part2.yml | 109 ++++++++++++ roles/openshift_storage_glusterfs/tasks/main.yml | 182 +++++++++++++++++++++ .../v1.6/glusterfs-registry-endpoints.yml.j2 | 11 ++ .../templates/v1.6/topology.json.j2 | 39 +++++ 19 files changed, 1042 insertions(+) create mode 100644 playbooks/common/openshift-glusterfs/config.yml create mode 120000 playbooks/common/openshift-glusterfs/filter_plugins create mode 120000 playbooks/common/openshift-glusterfs/lookup_plugins create mode 120000 playbooks/common/openshift-glusterfs/roles create mode 100644 roles/openshift_storage_glusterfs/README.md create mode 100644 roles/openshift_storage_glusterfs/defaults/main.yml create mode 100644 roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml create mode 100644 roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml create mode 100644 roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml create mode 100644 roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml create mode 100644 roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py create mode 100644 roles/openshift_storage_glusterfs/meta/main.yml create mode 100644 roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml create mode 100644 roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml create mode 100644 roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml create mode 100644 roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml create mode 100644 roles/openshift_storage_glusterfs/tasks/main.yml create mode 100644 roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 create mode 100644 roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml new file mode 100644 index 000000000..75faf5ba8 --- /dev/null +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -0,0 +1,21 @@ +--- +- name: Open firewall ports for GlusterFS + hosts: oo_glusterfs_to_config + vars: + os_firewall_allow: + - service: glusterfs_sshd + port: "2222/tcp" + - service: glusterfs_daemon + port: "24007/tcp" + - service: glusterfs_management + port: "24008/tcp" + - service: glusterfs_bricks + port: "49152-49251/tcp" + roles: + - os_firewall + +- name: Configure GlusterFS + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md new file mode 100644 index 000000000..cf0fb94c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/README.md @@ -0,0 +1,60 @@ +OpenShift GlusterFS Cluster +=========================== + +OpenShift GlusterFS Cluster Installation + +Requirements +------------ + +* Ansible 2.2 + +Role Variables +-------------- + +From this role: + +| Name | Default value | | +|--------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` + +Dependencies +------------ + +* os_firewall +* openshift_hosted_facts +* openshift_repos +* lib_openshift + +Example Playbook +---------------- + +``` +- name: Configure GlusterFS hosts + hosts: oo_first_master + roles: + - role: openshift_storage_glusterfs +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jose A. Rivera (jarrpa@redhat.com) diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml new file mode 100644 index 000000000..ade850747 --- /dev/null +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -0,0 +1,17 @@ +--- +openshift_storage_glusterfs_timeout: 300 +openshift_storage_glusterfs_namespace: 'default' +openshift_storage_glusterfs_is_native: True +openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_version: 'latest' +openshift_storage_glusterfs_wipe: False +openshift_storage_glusterfs_heketi_is_native: True +openshift_storage_glusterfs_heketi_is_missing: True +openshift_storage_glusterfs_heketi_deploy_is_missing: True +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_version: 'latest' +openshift_storage_glusterfs_heketi_admin_key: '' +openshift_storage_glusterfs_heketi_user_key: '' +openshift_storage_glusterfs_heketi_topology_load: True +openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml new file mode 100644 index 000000000..c9945be13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml @@ -0,0 +1,115 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: deploy-heketi + labels: + glusterfs: heketi-template + deploy-heketi: support + annotations: + description: Bootstrap Heketi installation + tags: glusterfs,heketi,installation +labels: + template: deploy-heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-service + deploy-heketi: support + annotations: + description: Exposes Heketi service + spec: + ports: + - name: deploy-heketi + port: 8080 + targetPort: 8080 + selector: + name: deploy-heketi +- kind: Route + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-route + deploy-heketi: support + spec: + to: + kind: Service + name: deploy-heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: deploy-heketi + labels: + glusterfs: deploy-heketi-dc + deploy-heketi: support + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + name: deploy-heketi + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: deploy-heketi + labels: + name: deploy-heketi + glusterfs: deploy-heketi-pod + deploy-heketi: support + spec: + serviceAccountName: heketi-service-account + containers: + - name: deploy-heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml new file mode 100644 index 000000000..3f8d8f507 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: glusterfs-registry-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml new file mode 100644 index 000000000..c66705752 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml @@ -0,0 +1,128 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: glusterfs + labels: + glusterfs: template + annotations: + description: GlusterFS DaemonSet template + tags: glusterfs +objects: +- kind: DaemonSet + apiVersion: extensions/v1beta1 + metadata: + name: glusterfs + labels: + glusterfs: daemonset + annotations: + description: GlusterFS DaemonSet + tags: glusterfs + spec: + selector: + matchLabels: + glusterfs-node: pod + template: + metadata: + name: glusterfs + labels: + glusterfs-node: pod + spec: + nodeSelector: + storagenode: glusterfs + hostNetwork: true + containers: + - name: glusterfs + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: glusterfs-heketi + mountPath: "/var/lib/heketi" + - name: glusterfs-run + mountPath: "/run" + - name: glusterfs-lvm + mountPath: "/run/lvm" + - name: glusterfs-etc + mountPath: "/etc/glusterfs" + - name: glusterfs-logs + mountPath: "/var/log/glusterfs" + - name: glusterfs-config + mountPath: "/var/lib/glusterd" + - name: glusterfs-dev + mountPath: "/dev" + - name: glusterfs-misc + mountPath: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + mountPath: "/sys/fs/cgroup" + readOnly: true + - name: glusterfs-ssl + mountPath: "/etc/ssl" + readOnly: true + securityContext: + capabilities: {} + privileged: true + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 100 + exec: + command: + - "/bin/bash" + - "-c" + - systemctl status glusterd.service + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + resources: {} + terminationMessagePath: "/dev/termination-log" + volumes: + - name: glusterfs-heketi + hostPath: + path: "/var/lib/heketi" + - name: glusterfs-run + emptyDir: {} + - name: glusterfs-lvm + hostPath: + path: "/run/lvm" + - name: glusterfs-etc + hostPath: + path: "/etc/glusterfs" + - name: glusterfs-logs + hostPath: + path: "/var/log/glusterfs" + - name: glusterfs-config + hostPath: + path: "/var/lib/glusterd" + - name: glusterfs-dev + hostPath: + path: "/dev" + - name: glusterfs-misc + hostPath: + path: "/var/lib/misc/glusterfsd" + - name: glusterfs-cgroup + hostPath: + path: "/sys/fs/cgroup" + - name: glusterfs-ssl + hostPath: + path: "/etc/ssl" + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} +parameters: +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml new file mode 100644 index 000000000..df045c170 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml @@ -0,0 +1,113 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: heketi + labels: + glusterfs: heketi-template + annotations: + description: Heketi service deployment template + tags: glusterfs,heketi +labels: + template: heketi +objects: +- kind: Service + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-service + annotations: + description: Exposes Heketi service + spec: + ports: + - name: heketi + port: 8080 + targetPort: 8080 + selector: + glusterfs: heketi-pod +- kind: Route + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-route + spec: + to: + kind: Service + name: heketi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: heketi + labels: + glusterfs: heketi-dc + annotations: + description: Defines how to deploy Heketi + spec: + replicas: 1 + selector: + glusterfs: heketi-pod + triggers: + - type: ConfigChange + strategy: + type: Recreate + template: + metadata: + name: heketi + labels: + glusterfs: heketi-pod + spec: + serviceAccountName: heketi-service-account + containers: + - name: heketi + image: ${IMAGE_NAME}:${IMAGE_VERSION} + imagePullPolicy: IfNotPresent + env: + - name: HEKETI_USER_KEY + value: ${HEKETI_USER_KEY} + - name: HEKETI_ADMIN_KEY + value: ${HEKETI_ADMIN_KEY} + - name: HEKETI_EXECUTOR + value: kubernetes + - name: HEKETI_FSTAB + value: /var/lib/heketi/fstab + - name: HEKETI_SNAPSHOT_LIMIT + value: '14' + - name: HEKETI_KUBE_GLUSTER_DAEMONSET + value: '1' + ports: + - containerPort: 8080 + volumeMounts: + - name: db + mountPath: /var/lib/heketi + readinessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 3 + httpGet: + path: /hello + port: 8080 + livenessProbe: + timeoutSeconds: 3 + initialDelaySeconds: 30 + httpGet: + path: /hello + port: 8080 + volumes: + - name: db + glusterfs: + endpoints: heketi-storage-endpoints + path: heketidbstorage +parameters: +- name: HEKETI_USER_KEY + displayName: Heketi User Secret + description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY + displayName: Heketi Administrator Secret + description: Set secret for administration of the Heketi service as user _admin_ +- name: IMAGE_NAME + displayName: GlusterFS container name + required: True +- name: IMAGE_VERSION + displayName: GlusterFS container versiona + required: True diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py new file mode 100644 index 000000000..88801e487 --- /dev/null +++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py @@ -0,0 +1,23 @@ +''' + Openshift Storage GlusterFS class that provides useful filters used in GlusterFS +''' + + +def map_from_pairs(source, delim="="): + ''' Returns a dict given the source and delim delimited ''' + if source == '': + return dict() + + return dict(source.split(delim) for item in source.split(",")) + + +# pylint: disable=too-few-public-methods +class FilterModule(object): + ''' OpenShift Storage GlusterFS Filters ''' + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + ''' Returns the names of the filters provided by this class ''' + return { + 'map_from_pairs': map_from_pairs + } diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml new file mode 100644 index 000000000..aab9851f9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Jose A. Rivera + description: OpenShift GlusterFS Cluster + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 +dependencies: +- role: openshift_hosted_facts +- role: openshift_repos +- role: lib_openshift diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml new file mode 100644 index 000000000..26ca5eebf --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -0,0 +1,107 @@ +--- +- assert: + that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + msg: Only one GlusterFS nodeselector key pair should be provided + +- assert: + that: "groups.oo_glusterfs_to_config | count >= 3" + msg: There must be at least three GlusterFS nodes specified + +- name: Delete pre-existing GlusterFS resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "template,daemonset" + name: glusterfs + state: absent + when: openshift_storage_glusterfs_wipe + +- name: Unlabel any existing GlusterFS nodes + oc_label: + name: "{{ item }}" + kind: node + state: absent + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.all }}" + when: openshift_storage_glusterfs_wipe + +- name: Delete pre-existing GlusterFS config + file: + path: /var/lib/glusterd + state: absent + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + when: openshift_storage_glusterfs_wipe + +- name: Get GlusterFS storage devices state + command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" + register: devices_info + delegate_to: "{{ item }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + failed_when: False + when: openshift_storage_glusterfs_wipe + + # Runs "vgremove -fy ; pvremove -fy " for every device found to be a physical volume. +- name: Clear GlusterFS storage device contents + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + delegate_to: "{{ item.item }}" + with_items: "{{ devices_info.results }}" + when: + - openshift_storage_glusterfs_wipe + - item.stdout_lines | count > 0 + +- name: Add service accounts to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + resource_kind: scc + resource_name: privileged + state: present + with_items: + - 'default' + - 'router' + +- name: Label GlusterFS nodes + oc_label: + name: "{{ glusterfs_host }}" + kind: node + state: add + labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ groups.oo_glusterfs_to_config }}" + loop_control: + loop_var: glusterfs_host + +- name: Copy GlusterFS DaemonSet template + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml" + dest: "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Create GlusterFS template + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: glusterfs + state: present + files: + - "{{ mktemp.stdout }}/glusterfs-template.yml" + +- name: Deploy GlusterFS pods + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "glusterfs" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + +- name: Wait for GlusterFS pods + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs-node=pod" + register: glusterfs_pods + until: + - "glusterfs_pods.results.results[0]['items'] | count > 0" + # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml new file mode 100644 index 000000000..9f092d5d5 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -0,0 +1,48 @@ +--- +- name: Delete pre-existing GlusterFS registry resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "svc,ep" + name: "glusterfs-registry-endpoints" + failed_when: False + +- name: Generate GlusterFS registry endpoints + template: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2" + dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Copy GlusterFS registry service + copy: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml" + dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Create GlusterFS registry endpoints + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: endpoints + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" + +- name: Create GlusterFS registry service + oc_obj: + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + state: present + kind: service + name: glusterfs-registry-endpoints + files: + - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" + +- name: Check if GlusterFS registry volume exists + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + register: registry_volume + +- name: Create GlusterFS registry volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml new file mode 100644 index 000000000..76ae1db75 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -0,0 +1,41 @@ +--- +- name: Copy initial heketi resource files + copy: + src: "{{ openshift.common.examples_content_version }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" + with_items: + - "deploy-heketi-template.yml" + +- name: Create deploy-heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: deploy-heketi + state: present + files: + - "{{ mktemp.stdout }}/deploy-heketi-template.yml" + +- name: Deploy deploy-heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "deploy-heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml new file mode 100644 index 000000000..84b85e95d --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -0,0 +1,109 @@ +--- +- name: Create heketi DB volume + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + register: setup_storage + failed_when: False + +# This is used in the subsequent task +- name: Copy the admin client config + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + check_mode: no + +# Need `command` here because heketi-storage.json contains multiple objects. +- name: Copy heketi DB to GlusterFS volume + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" + when: "setup_storage.rc == 0" + +- name: Wait for copy job to finish + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: job + state: list + name: "heketi-storage-copy-job" + register: heketi_job + until: + - "'results' in heketi_job.results and heketi_job.results.results | count > 0" + # Pod's 'Complete' status must be True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + failed_when: + - "'results' in heketi_job.results" + - "heketi_job.results.results | count > 0" + # Fail when pod's 'Failed' status is True + - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" + when: "setup_storage.rc == 0" + +- name: Delete deploy resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + failed_when: False + +- name: Copy heketi template + copy: + src: "{{ openshift.common.examples_content_version }}/heketi-template.yml" + dest: "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Create heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: template + name: heketi + state: present + files: + - "{{ mktemp.stdout }}/heketi-template.yml" + +- name: Deploy heketi pod + oc_process: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + template_name: "heketi" + create: True + params: + IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + +- name: Wait for heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: + - "heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs=heketi-service" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml new file mode 100644 index 000000000..265a3cc6e --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -0,0 +1,182 @@ +--- +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ openshift_storage_glusterfs_namespace }}" + when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: openshift_storage_glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,jobs,dc,secret" + selector: "deploy-heketi" + - kind: "template,route,dc,service" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: openshift_storage_glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + name: heketi-service-account + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + openshift_storage_glusterfs_heketi_deploy_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: openshift_storage_glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + openshift_storage_glusterfs_heketi_is_missing: False + when: + - "openshift_storage_glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_deploy_is_missing + - openshift_storage_glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ openshift_storage_glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - openshift_storage_glusterfs_heketi_is_native + - openshift_storage_glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - openshift_storage_glusterfs_is_native + - openshift_storage_glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + +- include: glusterfs_registry.yml + when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..d72d085c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Endpoints +metadata: + name: glusterfs-registry-endpoints +subsets: +- addresses: +{% for node in groups.oo_glusterfs_to_config %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 new file mode 100644 index 000000000..eb5b4544f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 @@ -0,0 +1,39 @@ +{ + "clusters": [ +{%- set clusters = {} -%} +{%- for node in groups.oo_glusterfs_to_config -%} + {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} + {%- if cluster in clusters -%} + {%- set _dummy = clusters[cluster].append(node) -%} + {%- else -%} + {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} + {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} + { + "nodes": [ +{%- for node in clusters[cluster] -%} + { + "node": { + "hostnames": { + "manage": [ + "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}" + ], + "storage": [ + "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}" + ] + }, + "zone": {{ hostvars[node].glusterfs_zone | default(1) }} + }, + "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} + "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] + }{% if not loop.last %},{% endif %} +{%- endfor -%} + ] +} -- cgit v1.2.3 From 044219e7509ac90360691d42780c73c5e849501a Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Tue, 14 Mar 2017 19:06:48 -0500 Subject: Integrate GlusterFS into OpenShift installation Signed-off-by: Jose A. Rivera --- filter_plugins/oo_filters.py | 17 +++++++++++++++++ playbooks/byo/openshift-cluster/cluster_hosts.yml | 2 ++ playbooks/common/openshift-cluster/config.yml | 4 ++++ playbooks/common/openshift-cluster/evaluate_groups.yml | 13 +++++++++++++ 4 files changed, 36 insertions(+) diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index b550bd16a..10c8600ba 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -773,6 +773,23 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): fsType=filesystem, volumeID=volume_id))) persistent_volumes.append(persistent_volume) + elif kind == 'glusterfs': + volume = params['volume']['name'] + size = params['volume']['size'] + access_modes = params['access']['modes'] + endpoints = params['glusterfs']['endpoints'] + path = params['glusterfs']['path'] + read_only = params['glusterfs']['readOnly'] + persistent_volume = dict( + name="{0}-volume".format(volume), + capacity=size, + access_modes=access_modes, + storage=dict( + glusterfs=dict( + endpoints=endpoints, + path=path, + readOnly=read_only))) + persistent_volumes.append(persistent_volume) elif not (kind == 'object' or kind == 'dynamic'): msg = "|failed invalid storage kind '{0}' for component '{1}'".format( kind, diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index cb464cf0d..268a65415 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -13,6 +13,8 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" g_nfs_hosts: "{{ groups.nfs | default([]) }}" +g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1b967b7f1..9e0295aaa 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -54,6 +54,10 @@ tags: - node +- include: ../openshift-glusterfs/config.yml + tags: + - glusterfs + - include: openshift_hosted.yml tags: - hosted diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 45a4875a3..6aac70f63 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -29,6 +29,10 @@ msg: The nfs group must be limited to one host when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + - fail: + msg: This playbook requires g_glusterfs_hosts to be set + when: "{{ g_glusterfs_hosts is not defined }}" + - name: Evaluate oo_all_hosts add_host: name: "{{ item }}" @@ -119,3 +123,12 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_nfs_hosts | default([]) }}" changed_when: no + + - name: Evaluate oo_glusterfs_to_config + add_host: + name: "{{ item }}" + groups: oo_glusterfs_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_glusterfs_hosts | default([]) }}" + changed_when: no -- cgit v1.2.3 From 84f65590e8acebdc0f4be35dd6ab41ec5627420b Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Mon, 3 Apr 2017 11:16:11 -0500 Subject: Allow for GlusterFS to provide registry storage Signed-off-by: Jose A. Rivera --- inventory/byo/hosts.origin.example | 3 ++ inventory/byo/hosts.ose.example | 3 ++ roles/openshift_facts/library/openshift_facts.py | 4 ++ roles/openshift_hosted/tasks/registry/registry.yml | 6 ++- .../tasks/registry/storage/glusterfs.yml | 51 ++++++++++++++++++++++ 5 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 roles/openshift_hosted/tasks/registry/storage/glusterfs.yml diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 27914e60a..6d7706013 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +# # AWS S3 # S3 bucket must already exist. #openshift_hosted_registry_storage_kind=object diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index f0269bff8..ef86c0922 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 #openshift_hosted_registry_storage_volume_size=10Gi # +# Native GlusterFS Registry Storage +#openshift_hosted_registry_storage_kind=glusterfs +# # AWS S3 # # S3 bucket must already exist. diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 7edf141e5..adeb85c3f 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -2155,6 +2155,10 @@ class OpenShiftFacts(object): nfs=dict( directory='/exports', options='*(rw,root_squash)'), + glusterfs=dict( + endpoints='glusterfs-registry-endpoints', + path='glusterfs-registry-volume', + readOnly=False), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 0b8042473..6e691c26f 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -109,7 +109,7 @@ type: persistentVolumeClaim claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim" when: - - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack'] + - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs'] - name: Create OpenShift registry oc_adm_registry: @@ -123,3 +123,7 @@ volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" + +- include: storage/glusterfs.yml + when: + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml new file mode 100644 index 000000000..b18b24266 --- /dev/null +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -0,0 +1,51 @@ +--- +- name: Wait for registry pods + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: pod + selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + register: registry_pods + until: + - "registry_pods.results.results[0]['items'] | count > 0" + # There must be as many matching pods with 'Ready' status True as there are expected replicas + - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int" + delay: 10 + retries: "{{ (600 / 10) | int }}" + +- name: Determine registry fsGroup + set_fact: + openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}" + +- name: Create temp mount directory + command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX + register: mktemp + changed_when: False + check_mode: no + +- name: Mount registry volume + mount: + state: mounted + fstype: glusterfs + src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + name: "{{ mktemp.stdout }}" + +- name: Set registry volume permissions + file: + dest: "{{ mktemp.stdout }}" + state: directory + group: "{{ openshift_hosted_registry_fsgroup }}" + mode: "2775" + recurse: True + +- name: Unmount registry volume + mount: + state: unmounted + name: "{{ mktemp.stdout }}" + +- name: Delete temp mount directory + file: + dest: "{{ mktemp.stdout }}" + state: absent + changed_when: False + check_mode: no -- cgit v1.2.3