diff options
22 files changed, 604 insertions, 9 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 6895500ec..99fd69afc 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.185.0 ./ +3.7.0-0.187.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 83f0e27cb..f1ace9b22 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@  Name:           openshift-ansible  Version:        3.7.0 -Release:        0.185.0%{?dist} +Release:        0.187.0%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0  URL:            https://github.com/openshift/openshift-ansible @@ -285,6 +285,12 @@ Atomic OpenShift Utilities includes  %changelog +* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0 +-  + +* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0 +-  +  * Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0  - bug 1506073. Lower cpu request for logging when it exceeds limit    (jcantril@redhat.com) diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 80cda9e21..c2ae5f313 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -17,6 +17,11 @@        tasks_from: firewall.yml      when:      - openshift_storage_glusterfs_is_native | default(True) | bool +  - include_role: +      name: openshift_storage_glusterfs +      tasks_from: kernel_modules.yml +    when: +    - openshift_storage_glusterfs_is_native | default(True) | bool  - name: Open firewall ports for GlusterFS registry nodes    hosts: glusterfs_registry @@ -26,6 +31,11 @@        tasks_from: firewall.yml      when:      - openshift_storage_glusterfs_registry_is_native | default(True) | bool +  - include_role: +      name: openshift_storage_glusterfs +      tasks_from: kernel_modules.yml +    when: +    - openshift_storage_glusterfs_registry_is_native | default(True) | bool  - name: Configure GlusterFS    hosts: oo_first_master diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index e1472ce38..350557f19 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -28,7 +28,7 @@      when: openshift_use_manageiq | default(true) | bool    - role: cockpit      when: -    - openshift.common.is_atomic +    - not openshift.common.is_atomic | bool      - deployment_type == 'openshift-enterprise'      - osm_use_cockpit is undefined or osm_use_cockpit | bool      - openshift.common.deployment_subtype != 'registry' diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 3f0752f4c..fe938e52b 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -29,3 +29,6 @@ r_crio_os_firewall_deny: []  r_crio_os_firewall_allow:  - service: crio    port: 10010/tcp + + +openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 5ea73568a..1539af53f 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -35,4 +35,4 @@    include: systemcontainer_crio.yml    when:      - l_use_crio -    - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] +    - openshift_docker_is_node_or_master | bool diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index 5a87813a0..67ede0d21 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -3,16 +3,16 @@  # TODO: Much of this file is shared with container engine tasks  - set_fact:      l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" -  when: l2_docker_insecure_registries +  when: l2_docker_insecure_registries | bool  - set_fact:      l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" -  when: l2_docker_additional_registries +  when: l2_docker_additional_registries | bool  - set_fact:      l_crio_registries: "{{ ['docker.io'] }}" -  when: not l2_docker_additional_registries +  when: not (l2_docker_additional_registries | bool)  - set_fact:      l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" -  when: l2_docker_additional_registries +  when: l2_docker_additional_registries | bool  - set_fact:      l_openshift_image_tag: "{{ openshift_image_tag | string }}" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 668a3f7e7..b98e281a3 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -36,6 +36,14 @@    - openshift_logging_label_key != ""    - openshift_logging_label_value is defined +- name: Annotate Logging Project to allow overcommit +  oc_edit: +    kind: ns +    name: "{{ openshift_logging_namespace }}" +    separator: '#' +    content: +      metadata#annotations#quota.openshift.io/cluster-resource-override-enabled: "false" +  - name: Create logging cert directory    file:      path: "{{ openshift.common.config_base }}/logging" diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 40775571f..a1a0bfaa9 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -179,6 +179,11 @@ masterPublicURL: {{ openshift.master.public_api_url }}  networkConfig:    clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% if openshift.common.version_gte_3_7 | bool %} +  clusterNetworks: +  - cidr: {{ openshift.master.sdn_cluster_network_cidr }} +    hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }} +{% endif %}  {% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}    networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}  {% endif %} diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml new file mode 100644 index 000000000..7b705c2d4 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml @@ -0,0 +1,135 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: deploy-heketi +  labels: +    glusterfs: heketi-template +    deploy-heketi: support +  annotations: +    description: Bootstrap Heketi installation +    tags: glusterfs,heketi,installation +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-service +      deploy-heketi: support +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: deploy-heketi-${CLUSTER_NAME} +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-route +      deploy-heketi: support +  spec: +    to: +      kind: Service +      name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-dc +      deploy-heketi: support +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: deploy-heketi +        labels: +          glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +          deploy-heketi: support +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterfs +  labels: +    glusterfs: template +  annotations: +    description: GlusterFS DaemonSet template +    tags: glusterfs +objects: +- kind: DaemonSet +  apiVersion: extensions/v1beta1 +  metadata: +    name: glusterfs-${CLUSTER_NAME} +    labels: +      glusterfs: ${CLUSTER_NAME}-daemonset +    annotations: +      description: GlusterFS DaemonSet +      tags: glusterfs +  spec: +    selector: +      matchLabels: +        glusterfs: ${CLUSTER_NAME}-pod +    template: +      metadata: +        name: glusterfs-${CLUSTER_NAME} +        labels: +          glusterfs: ${CLUSTER_NAME}-pod +          glusterfs-node: pod +      spec: +        nodeSelector: "${{NODE_LABELS}}" +        hostNetwork: true +        containers: +        - name: glusterfs +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          volumeMounts: +          - name: glusterfs-heketi +            mountPath: "/var/lib/heketi" +          - name: glusterfs-run +            mountPath: "/run" +          - name: glusterfs-lvm +            mountPath: "/run/lvm" +          - name: glusterfs-etc +            mountPath: "/etc/glusterfs" +          - name: glusterfs-logs +            mountPath: "/var/log/glusterfs" +          - name: glusterfs-config +            mountPath: "/var/lib/glusterd" +          - name: glusterfs-dev +            mountPath: "/dev" +          - name: glusterfs-misc +            mountPath: "/var/lib/misc/glusterfsd" +          - name: glusterfs-cgroup +            mountPath: "/sys/fs/cgroup" +            readOnly: true +          - name: glusterfs-ssl +            mountPath: "/etc/ssl" +            readOnly: true +          securityContext: +            capabilities: {} +            privileged: true +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          resources: {} +          terminationMessagePath: "/dev/termination-log" +        volumes: +        - name: glusterfs-heketi +          hostPath: +            path: "/var/lib/heketi" +        - name: glusterfs-run +          emptyDir: {} +        - name: glusterfs-lvm +          hostPath: +            path: "/run/lvm" +        - name: glusterfs-etc +          hostPath: +            path: "/etc/glusterfs" +        - name: glusterfs-logs +          hostPath: +            path: "/var/log/glusterfs" +        - name: glusterfs-config +          hostPath: +            path: "/var/lib/glusterd" +        - name: glusterfs-dev +          hostPath: +            path: "/dev" +        - name: glusterfs-misc +          hostPath: +            path: "/var/lib/misc/glusterfsd" +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: glusterfs-ssl +          hostPath: +            path: "/etc/ssl" +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        securityContext: {} +parameters: +- name: NODE_LABELS +  displayName: Daemonset Node Labels +  description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' +  value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME +  displayName: GlusterFS container image name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: heketi +  labels: +    glusterfs: heketi-template +  annotations: +    description: Heketi service deployment template +    tags: glusterfs,heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-service +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: heketi +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-route +  spec: +    to: +      kind: Service +      name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-dc +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: heketi-${CLUSTER_NAME} +        labels: +          glusterfs: heketi-${CLUSTER_NAME}-pod +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +          glusterfs: +            endpoints: heketi-db-${CLUSTER_NAME}-endpoints +            path: heketidbstorage +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 074904bec..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@  ---  - name: Create heketi DB volume -  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json" +  command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"    register: setup_storage  - name: Copy heketi-storage list diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml new file mode 100644 index 000000000..030fa81c9 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure device mapper modules loaded +  template: +    src: glusterfs.conf +    dest: /etc/modules-load.d/glusterfs.conf +  register: km + +- name: load kernel modules +  systemd: +    name: systemd-modules-load.service +    state: restarted +  when: km | changed diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf new file mode 100644 index 000000000..dd4d6e6f7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -0,0 +1,4 @@ +#{{ ansible_managed }} +dm_thin_pool +dm_snapshot +dm_mirror
\ No newline at end of file diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..454e84aaf --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} +  secretNamespace: "{{ glusterfs_namespace }}" +  secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 @@ -0,0 +1,36 @@ +{ +	"_port_comment": "Heketi Server Port Number", +	"port" : "8080", + +	"_use_auth": "Enable JWT authorization. Please enable for deployment", +	"use_auth" : false, + +	"_jwt" : "Private keys for access", +	"jwt" : { +		"_admin" : "Admin has access to all APIs", +		"admin" : { +			"key" : "My Secret" +		}, +		"_user" : "User only has access to /volumes endpoint", +		"user" : { +			"key" : "My Secret" +		} +	}, + +	"_glusterfs_comment": "GlusterFS Configuration", +	"glusterfs" : { + +		"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", +		"executor" : "{{ glusterfs_heketi_executor }}", + +		"_db_comment": "Database file name", +		"db" : "/var/lib/heketi/heketi.db", + +		"sshexec" : { +			"keyfile" : "/etc/heketi/private_key", +			"port" : "{{ glusterfs_heketi_ssh_port }}", +			"user" : "{{ glusterfs_heketi_ssh_user }}", +			"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} +		} +	} +} diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 @@ -0,0 +1,49 @@ +{ +  "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} +  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} +  {%- if cluster in clusters -%} +    {%- set _dummy = clusters[cluster].append(node) -%} +  {%- else -%} +    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} +  {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} +    { +      "nodes": [ +{%- for node in clusters[cluster] -%} +        { +          "node": { +            "hostnames": { +              "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} +                "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} +                "{{ node }}" +{%- endif -%} +              ], +              "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} +                "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} +              ] +            }, +            "zone": {{ hostvars[node].glusterfs_zone | default(1) }} +          }, +          "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} +            "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} +          ] +        }{% if not loop.last %},{% endif %} +{%- endfor -%} +      ] +    }{% if not loop.last %},{% endif %} +{%- endfor -%} +  ] +}  | 
