diff options
Diffstat (limited to 'roles')
99 files changed, 2104 insertions, 0 deletions
diff --git a/roles/ands_facts/README b/roles/ands_facts/README new file mode 100644 index 0000000..09abd03 --- /dev/null +++ b/roles/ands_facts/README @@ -0,0 +1,7 @@ +This is simplified Ands role which tries to detect storage configuration and set facts required for other +roles.  + +Facts: + - ands_data_vg + - ands_data_path + 
\ No newline at end of file diff --git a/roles/ands_facts/defaults/main.yml b/roles/ands_facts/defaults/main.yml new file mode 100644 index 0000000..257685d --- /dev/null +++ b/roles/ands_facts/defaults/main.yml @@ -0,0 +1,11 @@ +ands_data_device_default_threshold: 10 + +ands_empty_lv: { 'vg': '' } + +ands_data_lv: "ands_data" +ands_data_vg: "{{ ( ansible_lvm['lvs'][ands_data_lv] | default(ands_empty_lv) )['vg'] }}" +ands_heketi_lv: "ands_heketi" +ands_heketi_vg: "{{ ( ansible_lvm['lvs'][ands_heketi_lv] | default(ands_empty_lv) )['vg'] }}" + +ands_storage_servers: "{{ groups.ands_storage_servers | map('extract', hostvars, 'ands_storage_hostname') | list }}" +#openshift_storage_nodes: "{{ groups.storage_nodes | map('extract', hostvars, 'ands_storage_hostname') | list }}" diff --git a/roles/ands_facts/tasks/detect_data_path.yml b/roles/ands_facts/tasks/detect_data_path.yml new file mode 100644 index 0000000..0837e12 --- /dev/null +++ b/roles/ands_facts/tasks/detect_data_path.yml @@ -0,0 +1,10 @@ +--- +- name: Try to detect ands_data_path +  set_fact: ands_data_path="{{ item.mount }}" +  with_items: "{{ ansible_mounts }}" +  no_log: true +  when:  +    - not ands_data_path is defined +    - ansible_lvm.lvs[ands_data_lv] is defined +    - ansible_lvm.lvs[ands_data_lv].size_g > ( ands_data_device_threshold | default(ands_data_device_default_threshold) ) +    - item.device == "/dev/mapper/{{ands_data_vg}}-{{ands_data_lv}}" diff --git a/roles/ands_facts/tasks/main.yml b/roles/ands_facts/tasks/main.yml new file mode 100644 index 0000000..52cc5bc --- /dev/null +++ b/roles/ands_facts/tasks/main.yml @@ -0,0 +1,35 @@ +- include_vars: dir="vars" + +- include: detect_data_path.yml +  when: not ands_data_path is defined + +- name: Detect Heketi +  set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" +  when: ansible_lvm.lvs[ands_heketi_lv] is defined + +- name: Set some facts +  set_fact: +    ands_storage_servers: "{{ ands_storage_servers }}" + +- name: Set some facts +  set_fact: +    ands_data_vg: "{{ ands_data_vg }}" +  when: ands_data_vg != "" + +- name: Set some facts +  set_fact: +    ands_data_lv: "{{ ands_data_lv }}" +  when: ands_data_lv != "" + +- name: Set some facts +  set_fact: +    ands_heketi_vg: "{{ ands_heketi_vg }}" +  when: ands_heketi_vg != "" + +- name: Set some facts +  set_fact: +    ands_heketi_lv: "{{ ands_heketi_lv }}" +  when: ands_heketi_lv != "" + +#- command: yum-complete-transaction --cleanup-only + diff --git a/roles/ands_facts/vars b/roles/ands_facts/vars new file mode 120000 index 0000000..c56a6fe --- /dev/null +++ b/roles/ands_facts/vars @@ -0,0 +1 @@ +../../setup/configs/
\ No newline at end of file diff --git a/roles/ands_kaas/defaults/main.yml b/roles/ands_kaas/defaults/main.yml new file mode 100644 index 0000000..3835453 --- /dev/null +++ b/roles/ands_kaas/defaults/main.yml @@ -0,0 +1,11 @@ +kaas_resync: false +kaas_projects: "{{ ands_openshift_projects.keys() }}" + +kaas_template_root: "{{ ands_paths.provision }}/kaas/" + +kaas_glusterfs_endpoints: gfs +kaas_openshift_volumes: "{{ ands_openshift_volumes }}" + +kaas_default_volume_capacity: "1Ti" +kaas_default_file_owner: root +kaas_default_file_group: root diff --git a/roles/ands_kaas/tasks/file.yml b/roles/ands_kaas/tasks/file.yml new file mode 100644 index 0000000..9a36e74 --- /dev/null +++ b/roles/ands_kaas/tasks/file.yml @@ -0,0 +1,9 @@ +--- +- name: "Setting up files in {{ path }}" +  file:  +    path: "{{ path }}"  +    recurse: "{{ file.recurse | default(true) }}"  +    mode: "{{ file.mode | default( ((file.state | default('directory')) == 'directory') | ternary('0755', '0644') ) }}" +    owner: "{{ file.owner | default(kaas_project_config.file_owner) | default(kaas_default_file_owner) }}"  +    group: "{{ file.group | default(kaas_project_config.file_group) | default(kaas_default_file_group) }}" +    state: "{{ file.state | default('directory') }}" diff --git a/roles/ands_kaas/tasks/keys.yml b/roles/ands_kaas/tasks/keys.yml new file mode 100644 index 0000000..2096c75 --- /dev/null +++ b/roles/ands_kaas/tasks/keys.yml @@ -0,0 +1,37 @@ +--- +- name: Try to locate pubkey file +  set_fact: "kaas_{{ pod.key }}_pubkey={{ lookup('file', item) }}" +  with_first_found: +    - paths: +        - "{{ kaas_project_path }}/keys/" +      files: +        -  "{{ pod.key }}.crt" +        -  "{{ pod.key }}.pub" +        -  "{{ pod.value.service.host | default('default') }}.crt" +        -  "{{ pod.value.service.host | default('default') }}.pub" +      skip: true + +- name: Try to locate privkey file +  set_fact: "kaas_{{ pod.key }}_privkey={{ lookup('file', item) }}" +  with_first_found: +    - paths: +        - "{{ kaas_project_path }}/keys/" +      files: +        - "{{ pod.key }}.key" +        - "{{ pod.key }}.pem" +        - "{{ pod.value.service.host | default('default') }}.key" +        - "{{ pod.value.service.host | default('default') }}.pem" +      skip: true + +- name: Try to locate CA file +  set_fact: "kaas_{{ pod.key }}_ca={{ lookup('file', item) }}" +  with_first_found: +    - paths: +        - "{{ kaas_project_path }}/keys/" +      files: +        - "{{ pod.key }}.ca" +        - "{{ pod.value.service.host | default('default') }}.ca" +        - ca-bundle.pem +        - ca.pem +        - ca.crt +      skip: true diff --git a/roles/ands_kaas/tasks/main.yml b/roles/ands_kaas/tasks/main.yml new file mode 100644 index 0000000..c9fb857 --- /dev/null +++ b/roles/ands_kaas/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Provision OpenShift resources & configurations +#  include: only_templates.yml +  include: project.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  with_items: "{{ kaas_projects }}" +  loop_control: +    loop_var: kaas_project +  vars: +    kaas_template_path: "{{ kaas_template_root }}/{{ kaas_project }}" +    kaas_project_path: "{{playbook_dir}}/projects/{{ kaas_project }}" diff --git a/roles/ands_kaas/tasks/oc.yml b/roles/ands_kaas/tasks/oc.yml new file mode 100644 index 0000000..d3504f8 --- /dev/null +++ b/roles/ands_kaas/tasks/oc.yml @@ -0,0 +1,10 @@ +--- +- name: Configure KaaS resources +  include_role:  +    name: openshift_resource +    tasks_from: command.yml +  vars:  +    resource: "{{ ocitem.resource | default('') }}" +    command: "{{ ocitem.oc }}" +    project: "{{ kaas_project }}" +    recreate: "{{ ocitem.recreate | default(false) }}" diff --git a/roles/ands_kaas/tasks/ocitem.yml b/roles/ands_kaas/tasks/ocitem.yml new file mode 100644 index 0000000..f21e8cd --- /dev/null +++ b/roles/ands_kaas/tasks/ocitem.yml @@ -0,0 +1,13 @@ +--- +- name: OpenShift templates +  include: templates.yml +  run_once: true +  vars: +    kaas_template_glob: "{{ ocitem.template }}" +  when: ocitem.template is defined + +- name: OpenShift commands +  include: oc.yml +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  when: ocitem.oc is defined diff --git a/roles/ands_kaas/tasks/ocscript.yml b/roles/ands_kaas/tasks/ocscript.yml new file mode 100644 index 0000000..4927de4 --- /dev/null +++ b/roles/ands_kaas/tasks/ocscript.yml @@ -0,0 +1,8 @@ +--- +- include: ocitem.yml +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  with_items: "{{ kaas_project_config.oc }}" +  loop_control: +    loop_var: ocitem +  
\ No newline at end of file diff --git a/roles/ands_kaas/tasks/project.yml b/roles/ands_kaas/tasks/project.yml new file mode 100644 index 0000000..002596b --- /dev/null +++ b/roles/ands_kaas/tasks/project.yml @@ -0,0 +1,76 @@ +--- +- name: Load global variables +  include_vars: "{{kaas_project_path}}/vars/globals.yml"  +  when: "'{{kaas_project_path}}/vars/globals.yml' | is_file" + +- name: Load variables +  include_vars: dir="{{kaas_project_path}}/vars" name="kaas_project_config" +  when: "'{{kaas_project_path}}/vars' | is_dir" + +- name: Ensure OpenShift template directory exists +  file: path="{{ kaas_template_path }}" state="directory" mode=0755 owner=root group=root + +- name: Configure KaaS volumes +  include: volume.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  with_dict: "{{ kaas_project_config.volumes | default(kaas_openshift_volumes) }}" +  loop_control: +    loop_var: osv +  vars: +    query: "[*].volumes.{{osv.value.volume}}.mount" +    mntpath: "{{ (ands_storage_domains | json_query(query)) }}" +    path: "{{ mntpath[0] ~ (osv.value.path | default('')) }}" +    name: "{{osv.key}}" +    volume: "{{osv.value}}" +  when: ( mntpath | length ) > 0 + +- name: Copy static configuration +  include: sync_all.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  with_items: "{{ lookup('pipe', search).split('\n') }}" +  loop_control: +    loop_var: osv_path +  vars: +    search: "find {{ kaas_project_path }}/files/ -type d -mindepth 1 -maxdepth 1" +    osv: "{{ osv_path | basename }}"     +    pvar: "kaas_{{ osv }}_path" +    local_path: "{{ osv_path }}" +    remote_path: "{{ hostvars[inventory_hostname][pvar] }}" +  when: +    - osv in kaas_openshift_volumes +    - hostvars[inventory_hostname][pvar] is defined + +- name: Configure KaaS files +  include: file.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  with_items: "{{ kaas_project_config.files | default(ands_openshift_files) }}" +  loop_control: +    loop_var: file +  vars: +    pvar: "kaas_{{ file.osv }}_path" +    path: "{{ hostvars[inventory_hostname][pvar] }}/{{ file.path }}" +  when: file.osv in ( kaas_project_config.volumes | default(kaas_openshift_volumes) ) + +- name: Load OpenSSL keys +  include: keys.yml +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  with_dict: "{{ kaas_project_config.pods }}" +  loop_control: +    loop_var: pod + +- name: "Run OC script" +  include: ocscript.yml +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  when: kaas_project_config.oc is defined + +- name: "Configure all templates" +  include: templates.yml +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  when: kaas_project_config.oc is undefined + diff --git a/roles/ands_kaas/tasks/sync.yml b/roles/ands_kaas/tasks/sync.yml new file mode 100644 index 0000000..399cb66 --- /dev/null +++ b/roles/ands_kaas/tasks/sync.yml @@ -0,0 +1,8 @@ +--- +- name: Check if already exists +  stat: path="{{ item_dest }}"  +  register: result + +- name: "Sync '{{ item_name }}'" +  synchronize: src="{{ item_src }}" dest="{{ remote_path }}/" archive=yes +  when: (result.stat.exists == False) or (kaas_resync | default(false)) diff --git a/roles/ands_kaas/tasks/sync_all.yml b/roles/ands_kaas/tasks/sync_all.yml new file mode 100644 index 0000000..58a1710 --- /dev/null +++ b/roles/ands_kaas/tasks/sync_all.yml @@ -0,0 +1,13 @@ +#  If delegation is enabled, synchronize will look from files on delegated host not locally + +- name: "Analyze '{{ local_path | basename }}'" +#  debug: msg="{{ local_path }} - {{ item_name }} - {{ item }}" +  include: sync.yml +  run_once: true +  with_items: "{{ lookup('pipe', filesearch).split('\n') }}" +  vars: +    filesearch: "find '{{ local_path }}' -mindepth 1 -maxdepth 1" +    item_name: "{{ item | basename }}" +    item_src: "{{ local_path }}/{{ item_name }}" +    item_dest: "{{ remote_path }}/{{ item_name }}" +  when: item != "" diff --git a/roles/ands_kaas/tasks/template.yml b/roles/ands_kaas/tasks/template.yml new file mode 100644 index 0000000..6a81dd7 --- /dev/null +++ b/roles/ands_kaas/tasks/template.yml @@ -0,0 +1,17 @@ +- name: Populate template +  template: src="{{ item }}" dest="{{ kaas_template_path }}/{{ item | basename | regex_replace('\.j2','') }}" owner=root group=root mode="0644" +  register: result +  with_first_found: +    - paths: +        - "{{ role_path }}/templates/" +        - "{{ kaas_project_path }}/templates/" +      files: +        - "{{ tmpl_name }}" + +- name: Configure KaaS resources +  include_role: name="openshift_resource" +  vars:  +    template: "{{ tmpl_name | basename | regex_replace('\\.j2','') }}" +    template_path: "{{ kaas_template_path }}" +    project: "{{ kaas_project }}" +    recreate: "{{ result | changed | ternary (true, false) }}" diff --git a/roles/ands_kaas/tasks/templates.yml b/roles/ands_kaas/tasks/templates.yml new file mode 100644 index 0000000..75d43f3 --- /dev/null +++ b/roles/ands_kaas/tasks/templates.yml @@ -0,0 +1,20 @@ +--- +# Sorting is not enforeced +- name: "Find KaaS templates" +  command: "echo {{ item | quote }}" +  register: results +  changed_when: false +  with_fileglob: +    - "{{ role_path }}/templates/{{ kaas_template_glob | default('*') }}.j2" +    - "{{ kaas_project_path }}/templates/{{ kaas_template_glob | default('*') }}.j2" + +- name: "Sort and execute KaaS templates" +  include: "template.yml" +  delegate_to: "{{ groups.masters[0] }}" +  run_once: true +  with_items: "{{ sorted_tmpl }}" +  vars: +    sorted_tmpl: "{{ results | json_query('results[*].stdout_lines') | sum(start=[]) | map('basename') | sort | unique }}" +  loop_control: +    loop_var: tmpl_name + diff --git a/roles/ands_kaas/tasks/volume.yml b/roles/ands_kaas/tasks/volume.yml new file mode 100644 index 0000000..b82e55f --- /dev/null +++ b/roles/ands_kaas/tasks/volume.yml @@ -0,0 +1,11 @@ +--- +- name: "Configure {{ name }} fact" +  set_fact: "kaas_{{ name }}_path={{ path }}" + +- name: "Ensure {{ path }} exists" +  file:  +    path: "{{ path }}"  +    state: "directory"  +    mode: "{{ volume.mode | default(0755) }}"  +    owner: "{{ volume.owner | default(kaas_project_config.file_owner) | default(kaas_default_file_owner) }}"  +    group: "{{ volume.group | default(kaas_project_config.file_group) | default(kaas_default_file_group) }}" diff --git a/roles/ands_kaas/templates/0-gfs-volumes.yml.j2 b/roles/ands_kaas/templates/0-gfs-volumes.yml.j2 new file mode 100644 index 0000000..a162c8b --- /dev/null +++ b/roles/ands_kaas/templates/0-gfs-volumes.yml.j2 @@ -0,0 +1,38 @@ +--- +apiVersion: v1 +kind: Template +metadata: +  name:  +  annotations: +    descriptions: "KATRIN Volumes" +objects: +{% for name, vol in (kaas_project_config.volumes | default(kaas_openshift_volumes)).iteritems() %} +  - apiVersion: v1 +    kind: PersistentVolume +    metadata: +      name: {{ vol.name | default(name) }} +    spec: +      persistentVolumeReclaimPolicy: Retain  +      glusterfs:  +        endpoints: {{ kaas_glusterfs_endpoints }} +        path: {{ vol.volume }} +        readOnly: {{ not (vol.write | default(false)) }} +      accessModes: +        - {{ vol.access | default('ReadWriteMany') }} +      capacity: +        storage: {{ vol.capacity | default(kaas_default_volume_capacity) }} +      claimRef: +        name: {{ vol.name | default(name) }} +        namespace: {{ kaas_project }} +  - apiVersion: v1 +    kind: PersistentVolumeClaim +    metadata: +      name: {{ vol.name | default(name) }} +    spec: +      volumeName: {{ vol.name | default(name) }} +      accessModes: +        - {{ vol.access | default('ReadWriteMany') }} +      resources: +        requests: +          storage: {{ vol.capacity | default(kaas_default_volume_capacity) }} +{% endfor %} diff --git a/roles/ands_kaas/templates/6-kaas-pods.yml.j2 b/roles/ands_kaas/templates/6-kaas-pods.yml.j2 new file mode 100644 index 0000000..9849bd3 --- /dev/null +++ b/roles/ands_kaas/templates/6-kaas-pods.yml.j2 @@ -0,0 +1,173 @@ +#jinja2: trim_blocks: "true", lstrip_blocks: "false" +--- +apiVersion: v1 +kind: Template +metadata: +  name: {{ kaas_project }}-pods +  annotations: +    descriptions: {{ kaas_project_config.description | default(kaas_project ~ "auto-generated pod template") }} +objects: +{% for name, pod in (kaas_project_config.pods | default(kaas_openshift_volumes)).iteritems() %} +  {% set pubkey = "kaas_" ~ name ~ "_pubkey" %} +  {% set privkey = "kaas_" ~ name ~ "_privkey" %} +  {% set cakey = "kaas_" ~ name ~ "_ca" %} +  {% if pod.service is defined %} +  - apiVersion: v1 +    kind: Service +    metadata: +      name: {{ pod.name | default(name) }} +    spec: +      selector: +        name: {{ pod.name | default(name) }} +    {% if pod.service.ports is defined %} +      ports: +        {% for port in pod.service.ports %} +            {% set portmap = (port | string).split('/') %} +        - name: "{{ portmap[0] }}" +          port: {{ portmap[0] }} +          targetPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }} +        {% endfor %} +    {% endif %} +    {% if (pod.service.ports is defined) and (pod.service.host is defined) %} +      {% set first_port = (pod.service.ports[0] | string).split('/')[0] %} +  - apiVersion: v1 +    kind: Route +    metadata: +      name: kaas +    spec: +      host: {{ pod.service.host }} +      to: +        kind: Service +        name: {{ pod.name | default(name) }} +      port: +        targetPort: {{ first_port }} +      {% if (first_port == "80") %} +      tls: +        termination: edge +        insecureEdgeTerminationPolicy: Allow +        {% if hostvars[inventory_hostname][pubkey] is defined %} +        certificate: |- +          {{ hostvars[inventory_hostname][pubkey] | indent(10) }} +        {% endif %} +        {% if hostvars[inventory_hostname][privkey] is defined %} +        key: |- +          {{ hostvars[inventory_hostname][privkey] | indent(10) }} +        {% endif %} +        {% if hostvars[inventory_hostname][cakey] is defined %} +        caCertificate: |- +          {{ hostvars[inventory_hostname][cakey] | indent(10) }} +        {% endif %} +      {% endif %} +    {% endif %} +  {% endif %} +  - apiVersion: v1 +    kind: DeploymentConfig +    metadata: +      name: kaas +    spec: +      replicas: {{ pod.sched.replicas | default(1) }} +      selector: +        name: {{ pod.name | default(name) }} +      template: +        metadata: +          name: {{ pod.name | default(name) }} +          labels: +            name: {{ pod.name | default(name) }} +        strategy: +          type: {{ pod.sched.strategy | default('Rolling') }} +        triggers: +          - type: ConfigChange +        spec: +    {% if pod.selector is defined %} +          nodeSelector:  +      {% for skey, sval in pod.selector.iteritems() %} +            {{ skey }}: "{{ sval }}" +      {% endfor %} +    {% endif %} +    {% set mappings = (pod.images | json_query('[*].mappings') | length)  %} +    {% if mappings > 0 %} +          volumes: +      {% for img in pod.images %} +        {% set imgidx = loop.index %} +        {% for vol in img.mappings %} +            - name: vol-{{imgidx}}-{{loop.index}} +              persistentVolumeClaim:  +                claimName: {{ vol.name }} +        {% endfor %} +      {% endfor %} +    {% endif %} +          containers: +    {% for img in pod.images %} +      {% set imgidx = loop.index %} +            - name: {{ img.name | default(pod.name) | default(name) }} +              image: {{ img.image }} +              imagePullPolicy: Always +              ports: +      {% if img.ports is defined %} +        {% for port in img.ports %} +                - containerPort: {{ port }} +        {% endfor %} +      {% else %} +        {% for port in pod.service.ports %} +          {% set portmap = (port | string).split('/') %} +                - containerPort: {{ (portmap[1] is defined) | ternary(portmap[1], portmap[0]) }} +        {% endfor %} +      {% endif %} +      {% if img.env is defined %} +              env: +        {% for env_name, env_val in img.env.iteritems() %} +          {% set env_parts = (env_val | string).split('@') %} +          {% if env_parts[0] == "secret" %} +                - name: {{ env_name }} +             {% set env_sec = (env_parts[1] | string).split('/') %} +                  valueFrom:  +                    secretKeyRef: +                      name: {{ env_sec[0] }} +                      key: {{ env_sec[1] }} +          {% elif env_parts[0] == "cm" %} +             {% set env_cm = (env_parts[1] | string).split('/') %} +                  valueFrom:  +                    configMapKeyRef: +                      name: {{ env_cm[0] }} +                      key: {{ env_cm[1] }} +          {% else %} +                  value: {{ env_val }} +          {% endif %} +        {% endfor %} +      {% endif %} +      {% if img.mappings is defined %} +              volumeMounts: +        {% for vol in img.mappings %} +                - name: vol-{{imgidx}}-{{loop.index}} +                  subPath: {{ (((kaas_project_config.volumes | default(kaas_openshift_volumes))[vol.name].path | default("")) ~ "/") | regex_replace('^/','')  }}{{ vol.path | default("") }} +                  mountPath: {{ vol.mount }} +        {% endfor %} +      {% endif %} +      {% if img.probes is defined %} +        {% for probe in img.probes %} +          {% if (probe.type is undefined) %} +            {% set seq = ['livenessProbe', 'readynessProbe'] %} +          {% elif (probe.type == "liveness") %} +            {% set seq = ['livenessProbe'] %} +          {% else %} +            {% set seq = ['readynessProbe'] %} +          {% endif %} +          {% for type in seq %} +              {{ type }}: +                timeoutSeconds: {{ probe.timeout | default(1) }} +                initialDelaySeconds: {{ probe.delay | default(10) }} +            {% if (probe.cmd is defined) %} +                command: "{{ probe.cmd }}" +            {% elif (probe.path is defined) %} +                httpGet:  +                  path: {{ probe.path }} +                  port: {{ probe.port | default(80) }} +            {% else %} +                tcpSocket: +                  port: {{ probe.port | default(80) }} +            {% endif %} +          {% endfor %} +        {% endfor %} +      {% endif %} +    {% endfor %} +{% endfor %} diff --git a/roles/ands_openshift/defaults/main.yml b/roles/ands_openshift/defaults/main.yml new file mode 100644 index 0000000..857c389 --- /dev/null +++ b/roles/ands_openshift/defaults/main.yml @@ -0,0 +1,11 @@ +openshift_all_subroles: "{{ [ 'hostnames', 'users', 'ssh', 'storage', 'heketi' ] }}" +openshift_subroles: "{{ ( subrole is defined ) | ternary( [ subrole ], openshift_all_subroles ) }}" + +openshift_namespace: "default" +ands_disable_dynamic_provisioning: false + +ssh_template_path: "{{ ands_paths.provision }}/ssh/" +storage_template_path: "{{ ands_paths.provision }}/gfs/" +heketi_template_path: "{{ ands_paths.provision }}/heketi/" + +openshift_storage_nodes: "{{ groups.storage_nodes | map('extract', hostvars, 'ands_storage_hostname') | list }}" diff --git a/roles/ands_openshift/files/gfs-svc.yml b/roles/ands_openshift/files/gfs-svc.yml new file mode 100644 index 0000000..359f3b1 --- /dev/null +++ b/roles/ands_openshift/files/gfs-svc.yml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Template +metadata: +  name: gfs +  annotations: +    descriptions: "GlusterFS endpoints & service" +    tags: glusterfs +objects: +  - apiVersion: v1 +    kind: Service +    metadata: +      name: gfs +    spec: +      ports: +        - port: 1 diff --git a/roles/ands_openshift/files/heketi/heketi.json b/roles/ands_openshift/files/heketi/heketi.json new file mode 100644 index 0000000..9efe610 --- /dev/null +++ b/roles/ands_openshift/files/heketi/heketi.json @@ -0,0 +1,23 @@ +{ +        "_port_comment": "Heketi Server Port Number", +        "port" : "8080", + +        "use_auth" : false, +        "jwt" : { +                "admin" : { +                        "key" : "My Secret" +                }, +                "user" : {  +                        "key" : "My Secret" +                } +        }, + +        "glusterfs" : { +                "executor" : "ssh", +                "sshexec": { +                    "keyfile": "/etc/heketi_keys/id_rsa", +                    "user": "root" +                }, +                "db" : "/var/lib/heketi/heketi.db" +        } +} diff --git a/roles/ands_openshift/handlers/main.yml b/roles/ands_openshift/handlers/main.yml new file mode 100644 index 0000000..e46b2a9 --- /dev/null +++ b/roles/ands_openshift/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: heketi_topology +  debug: msg="heketi-cli -s http://heketi.{{ openshift_master_default_subdomain }} --user=admin --secret={{ ands_secrets.heketi.admin | quote }} topology load --json={{ heketi_template_path }}/topology.json" +#  command: heketi-cli -s "http://heketi.{{ openshift_master_default_subdomain }}" --user="admin" --secret={{ ands_secrets.heketi.admin | quote }} topology load --json="{{ heketi_template_path }}/topology.json" diff --git a/roles/ands_openshift/tasks/heketi.yml b/roles/ands_openshift/tasks/heketi.yml new file mode 100644 index 0000000..149f85d --- /dev/null +++ b/roles/ands_openshift/tasks/heketi.yml @@ -0,0 +1,13 @@ +--- +- block: +  - name: Ensure all required packages are installed +    yum: name={{item}} state=present +    with_items: +      - heketi-client + +  - include: heketi_resources.yml +    run_once: true +    delegate_to: "{{ groups.masters[0] }}" +    when: ansible_lvm.lvs.{{ ands_heketi_lv }} is defined + +  when: ansible_lvm.lvs.{{ ands_heketi_lv }} is defined diff --git a/roles/ands_openshift/tasks/heketi_perms.yml b/roles/ands_openshift/tasks/heketi_perms.yml new file mode 100644 index 0000000..4df6260 --- /dev/null +++ b/roles/ands_openshift/tasks/heketi_perms.yml @@ -0,0 +1,9 @@ +--- +- name: Mount heketidb volume +  mount: name="{{ heketi_template_path }}/heketidbstorage"  src="localhost:heketidbstorage" fstype="glusterfs" opts="defaults,_netdev" state="mounted" + +- name: Allow writting to heketidb +  file: path="{{ heketi_template_path }}/heketidbstorage" owner="root" group="root" mode=0777 + +- name: Mount heketidb volume +  mount: name="{{ heketi_template_path }}/heketidbstorage"  state="absent" diff --git a/roles/ands_openshift/tasks/heketi_resources.yml b/roles/ands_openshift/tasks/heketi_resources.yml new file mode 100644 index 0000000..06ae6b3 --- /dev/null +++ b/roles/ands_openshift/tasks/heketi_resources.yml @@ -0,0 +1,74 @@ +--- +- name: Ensure heketi configuration directory exists +  file: path="{{ heketi_template_path }}" state="directory" mode=0600 owner=root group=root + +- name: Check if secret exists +  command: oc -n "{{ openshift_namespace }}" get secret/heketi +  register: result +  failed_when: false +  changed_when: (result | failed) +   +- name: Create secret for dynamic volume provisioning +  command: "kubectl create secret generic heketi --type=kubernetes.io/glusterfs --from-literal=key={{ ands_secrets.heketi.admin | quote }} --from-literal=user={{ ands_secrets.heketi.user | quote }} --namespace={{ openshift_namespace }}" +  when: (result | changed) + +- name: Copy Heketi configuration +  copy: src="heketi/heketi.json" dest="{{ heketi_template_path }}/heketi.json" owner=root group=root mode="0644" +  register: result1 + +- name: Check if configMap exists +  command: oc -n "{{ openshift_namespace }}" get cm/heketi +  register: result2 +  failed_when: false +  changed_when: (result2 | failed) + +- name: Desotry existing Heketi configuration +  command: oc -n "{{ openshift_namespace }}" delete cm/heketi +  when: ( result1 | changed ) and (not (result2 | changed)) + +- name: Create heketiConfigmap +  command: oc  -n "{{ openshift_namespace }}" create cm heketi --from-file="{{ heketi_template_path }}/heketi.json" +  when: (result1 | changed) or (result2 | changed) + +- name: Check if Heketi POD is running +  command: oc -n "{{ openshift_namespace }}" get dc/heketi --template "{{ '{{.status.availableReplicas}}' }}" +  register: result +  failed_when: false +  changed_when: (result | failed) or ((result.stdout | int) < 1) + +- name: Fix GlusterFS volume permissions +  include: heketi_perms.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  when: (result | changed) + +- name: Copy Heketi Template +  template: src="heketi/heketi_template.json.j2" dest="{{ heketi_template_path }}/heketi_template.json" owner=root group=root mode="0644" +  register: result + +- name: Create Heketi Pod +  include_role: name="openshift_resource" +  vars: +    template: heketi_template.json +    template_path: "{{ heketi_template_path }}" +    project: "{{ openshift_namespace }}" +    recreate: "{{ result | changed | ternary (true, false) }}" + +- name: Wait until heketi service is running +  wait_for: host="heketi.{{ openshift_master_default_subdomain }}" port=80 state=present +   +- name: Copy Heketi topology +  template: src="heketi/topology.json.j2" dest="{{ heketi_template_path }}/topology.json" owner=root group=root mode="0644" +  notify: heketi_topology +   +- name: Copy Heketi storage class +  template: src="heketi/heketi-sc.yml.j2" dest="{{ heketi_template_path }}/heketi-sc.yml" owner=root group=root mode="0644" +  register: result + +- name: Setup Heketi-based dynamic volume provisioning +  include_role: name="openshift_resource" +  vars: +    template: heketi-sc.yml +    template_path: "{{ heketi_template_path }}" +    project: "{{ openshift_namespace }}" +    recreate: "{{ result | changed | ternary (true, false) }}" diff --git a/roles/ands_openshift/tasks/hostnames.yml b/roles/ands_openshift/tasks/hostnames.yml new file mode 100644 index 0000000..e489a8c --- /dev/null +++ b/roles/ands_openshift/tasks/hostnames.yml @@ -0,0 +1,15 @@ +--- +#- name: Remove obsolte hostnames from /etc/hosts +#  lineinfile: dest="/etc/hosts" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="absent" +#  with_inventory_hostnames: +#    - nodes + + +- name: Configure all cluster hostnames in /etc/hosts +  lineinfile: dest="/etc/hosts" line="{{ hostvars[item]['openshift_ip'] }} {{ hostvars[item]['openshift_public_hostname'] }} {{ hostvars[item]['openshift_hostname'] }}" regexp="{{ hostvars[item]['openshift_hostname'] }}" state="present" +  with_inventory_hostnames: +    - nodes + +- name: Provision /etc/hosts to ensure that all masters servers are accessing Master API on loopback device +  lineinfile: dest="/etc/hosts" line="127.0.0.1 {{ openshift_master_cluster_hostname }}" regexp=".*{{ openshift_master_cluster_hostname }}$" state="present" +  when: "'masters' in group_names" diff --git a/roles/ands_openshift/tasks/main.yml b/roles/ands_openshift/tasks/main.yml new file mode 100644 index 0000000..f72123f --- /dev/null +++ b/roles/ands_openshift/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: "Configuring OpenShift" +  include: "{{ current_subrole }}.yml" +  with_items: "{{ openshift_subroles }}" +  loop_control: +    loop_var: current_subrole diff --git a/roles/ands_openshift/tasks/ssh.yml b/roles/ands_openshift/tasks/ssh.yml new file mode 100644 index 0000000..7d8d99d --- /dev/null +++ b/roles/ands_openshift/tasks/ssh.yml @@ -0,0 +1,21 @@ +--- +- name: Check if ssh secret exists +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  command: oc -n "{{ openshift_namespace }}" get secret/ands-ssh +  register: result +  changed_when: (result | failed) +  failed_when: false + +- include: ssh_keygen.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" +  when: (result | changed) + +- name: Read SSH public key +  shell: cat "{{ ssh_template_path }}/id_rsa.pub" +  changed_when: false +  register: result + +- name: Distribute public keys +  authorized_key: user="root" key="{{result.stdout}}" state=present manage_dir=yes exclusive=no diff --git a/roles/ands_openshift/tasks/ssh_keygen.yml b/roles/ands_openshift/tasks/ssh_keygen.yml new file mode 100644 index 0000000..21a7b0a --- /dev/null +++ b/roles/ands_openshift/tasks/ssh_keygen.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure ssh directory exists +  file: path="{{ ssh_template_path }}" state="directory" mode=0600 owner=root group=root + +- name: Generate ssh-key +  command: ssh-keygen -t rsa -C "ands-ssh@ipe.kit.edu" -N "" -f "{{ ssh_template_path }}"/id_rsa creates="{{ ssh_template_path }}/id_rsa" + +- name: Create ssh secret +  command: oc -n "{{ openshift_namespace }}" secrets new ands-ssh id_rsa="{{ ssh_template_path }}"/id_rsa id_rsa_pub="{{ ssh_template_path }}/id_rsa.pub" + +- name: Ensure ssh secret key is removed +  file: path="{{ ssh_template_path }}/id_rsa" state=absent diff --git a/roles/ands_openshift/tasks/storage.yml b/roles/ands_openshift/tasks/storage.yml new file mode 100644 index 0000000..be2583a --- /dev/null +++ b/roles/ands_openshift/tasks/storage.yml @@ -0,0 +1,4 @@ +--- +- include: storage_resources.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" diff --git a/roles/ands_openshift/tasks/storage_resources.yml b/roles/ands_openshift/tasks/storage_resources.yml new file mode 100644 index 0000000..5adf69e --- /dev/null +++ b/roles/ands_openshift/tasks/storage_resources.yml @@ -0,0 +1,33 @@ +--- +- name: Ensure OpenShift template directory exists +  file: path="{{ storage_template_path }}" state="directory" mode=0644 owner=root group=root + +- name: Copy GlusterFS service template +  copy: src="gfs-svc.yml" dest="{{ storage_template_path }}/gfs-svc.yml" owner=root group=root mode="0644" +  register: result + +- name: Configure GFS service & endpoints +  include_role: name="openshift_resource" +  vars:  +    template: gfs-svc.yml  +    template_path: "{{ storage_template_path }}" +    project: "{{ prj_item }}"  +    recreate: "{{ result | changed | ternary (true, false) }}" +  with_items: "{{ ands_openshift_projects.keys() | union(['default']) }}" +  loop_control:  +    loop_var: prj_item + +- name: Configure GlusterFS end-points +  template: src="gfs-ep.yml.j2" dest="{{ storage_template_path }}/gfs-ep.yml" owner=root group=root mode="0644" +  register: result + +- name: Configure GFS service & endpoints +  include_role: name="openshift_resource" +  vars:  +    template: gfs-ep.yml  +    template_path: "{{ storage_template_path }}" +    project: "{{ prj_item }}" +    recreate: "{{ result | changed | ternary (true, false) }}" +  with_items: "{{ ands_openshift_projects.keys() | union(['default']) }}" +  loop_control:  +    loop_var: prj_item diff --git a/roles/ands_openshift/tasks/users.yml b/roles/ands_openshift/tasks/users.yml new file mode 100644 index 0000000..c816203 --- /dev/null +++ b/roles/ands_openshift/tasks/users.yml @@ -0,0 +1,8 @@ +--- +- name: Copy htpasswd to /etc/origin/master +  copy: src="users/htpasswd" dest="/etc/origin/master/htpasswd" mode=0644 owner=root group=root force=yes backup=no +  when: "'masters' in group_names" + +- include: users_resources.yml +  run_once: true +  delegate_to: "{{ groups.masters[0] }}" diff --git a/roles/ands_openshift/tasks/users_resources.yml b/roles/ands_openshift/tasks/users_resources.yml new file mode 100644 index 0000000..35323cb --- /dev/null +++ b/roles/ands_openshift/tasks/users_resources.yml @@ -0,0 +1,40 @@ +--- +- name: Configure cluster roles +  command: "oc adm policy add-cluster-role-to-user {{  item.key.split('/')[0] }} {{ item.value.replace(' ','').split(',') | join(' ') }}" +  with_dict: "{{ ands_openshift_roles }}" +  when: "{{ item.key.split('/') | length == 1 }}" + +- name: Get project list +  command: "oc get projects -o json" +  changed_when: false +  register: results + +- name: Find missing projects +  set_fact: new_projects="{{ ands_openshift_projects.keys() | difference (results.stdout | from_json | json_query('items[*].metadata.name')) }}" +  when: (results | succeeded) + +- name: Create missing projects +  command: "oc adm new-project --description '{{ ands_openshift_projects[item] }}' {{ item }}" +  with_items: "{{ new_projects | default([]) }}" + +- name: Configure per project roles +  command: "oc adm policy add-role-to-user -n {{  item.key.split('/')[0] }} {{ item.key.split('/')[1] }} {{ item.value.replace(' ','').split(',') | join(' ') }}" +  with_dict: "{{ ands_openshift_roles }}" +  when: "{{ item.key.split('/') | length == 2 }}" + +- name: Get user list +  command: "oc get users -o json" +  changed_when: false +  register: results + +- name: Find removed users +  set_fact: removed_users="{{ results.stdout | from_json | json_query('items[*].metadata.name') | difference(ands_openshift_users.keys()) }}" +  when: (results | succeeded) + +- name: Create missing projects +  command: "oc delete identity htpasswd_auth:{{ item }}" +  with_items: "{{ removed_users | default([]) }}" + +- name: Create missing projects +  command: "oc delete user {{ item }}" +  with_items: "{{ removed_users | default([]) }}" diff --git a/roles/ands_openshift/templates/gfs-ep.yml.j2 b/roles/ands_openshift/templates/gfs-ep.yml.j2 new file mode 100644 index 0000000..de3acac --- /dev/null +++ b/roles/ands_openshift/templates/gfs-ep.yml.j2 @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: Template +metadata: +  name: gfs +  annotations: +    descriptions: "GlusterFS endpoints & service" +    tags: glusterfs +objects: +  - apiVersion: v1 +    kind: Endpoints +    metadata: +      name: gfs +    subsets: +{% for node in openshift_storage_nodes %} +      - addresses: +          - ip: {{ node }} +        ports: +          - port: 1 +{% endfor %} diff --git a/roles/ands_openshift/templates/heketi/heketi-sc.yml.j2 b/roles/ands_openshift/templates/heketi/heketi-sc.yml.j2 new file mode 100644 index 0000000..23ce6ce --- /dev/null +++ b/roles/ands_openshift/templates/heketi/heketi-sc.yml.j2 @@ -0,0 +1,21 @@ +--- +apiVersion: v1 +kind: Template +metadata: +  name: heketi-sc +  annotations: +    descriptions: "Heketi Dynamic Volume Provisioning" +    tags: heketi +objects: +  - apiVersion: storage.k8s.io/v1beta1 +    kind: StorageClass +    metadata: +      name: heketi +      annotations: +        storageclass.beta.kubernetes.io/is-default-class: "true"  +    provisioner: kubernetes.io/glusterfs +    parameters: +      resturl: "http://heketi.{{ openshift_master_default_subdomain }}"  +      restuser: "admin" +      secretName: "heketi"  +      secretNamespace: "default"  diff --git a/roles/ands_openshift/templates/heketi/heketi_template.json.j2 b/roles/ands_openshift/templates/heketi/heketi_template.json.j2 new file mode 100644 index 0000000..221662b --- /dev/null +++ b/roles/ands_openshift/templates/heketi/heketi_template.json.j2 @@ -0,0 +1,232 @@ +{ +  "kind": "Template", +  "apiVersion": "v1", +  "metadata": { +    "name": "heketi", +    "labels": { +      "glusterfs": "heketi-template" +    }, +    "annotations": { +      "description": "Heketi service deployment template", +      "tags": "glusterfs,heketi" +    } +  }, +  "labels": { +    "template": "heketi" +  }, +  "objects": [ +    { +        "kind": "PersistentVolume", +        "apiVersion": "v1", +        "metadata": { +            "name": "heketidb" +        }, +        "spec": { +            "persistentVolumeReclaimPolicy": "Retain", +            "glusterfs": { +                "endpoints": "gfs", +                "path": "heketidbstorage" +            }, +            "accessModes": [ "ReadWriteMany" ], +            "capacity": { +                "storage": "1Gi" +            }, +            "claimRef": { +                "name": "heketidb", +                "namespace": "default" +            } +        } +    }, +    { +        "kind": "PersistentVolumeClaim", +        "apiVersion": "v1", +        "metadata": { +            "name": "heketidb" +        }, +        "spec": { +            "volumeName": "heketidb", +            "accessModes": [ "ReadWriteMany" ], +            "resources": { +                "requests": { +                    "storage": "1Gi" +                } +            } +        } +    }, +    { +      "kind": "Service", +      "apiVersion": "v1", +      "metadata": { +        "name": "heketi", +        "labels": { +          "glusterfs": "heketi" +        }, +        "annotations": { +          "description": "Exposes Heketi service" +        } +      }, +      "spec": { +        "ports": [ +          { +            "name": "heketi", +            "port": 8080, +            "targetPort": 8080 +          } +        ], +        "selector": { +          "name": "heketi" +        } +      } +    }, +    { +      "kind": "Route", +      "apiVersion": "v1", +      "metadata": { +        "name": "heketi", +        "labels": { +          "glusterfs": "heketi" +        } +      }, +      "spec": { +        "host": "heketi.{{ openshift_master_default_subdomain }}", +        "to": { +          "kind": "Service", +          "name": "heketi" +        } +      } +    }, +    { +      "kind": "DeploymentConfig", +      "apiVersion": "v1", +      "metadata": { +        "name": "heketi", +        "labels": { +          "glusterfs": "heketi" +        }, +        "annotations": { +          "description": "Defines how to deploy Heketi" +        } +      }, +      "spec": { +        "replicas": 1, +        "selector": { +          "name": "heketi" +        }, +        "template": { +          "metadata": { +            "name": "heketi", +            "labels": { +              "name": "heketi", +              "glusterfs": "heketi" +            } +          }, +          "triggers": [ +            { +              "type": "ConfigChange" +            } +          ], +          "strategy": { +            "type": "Recreate" +          }, +          "spec": { +            "nodeSelector": { +                "master": "1" +            }, +            "containers": [ +              { +                "name": "heketi", +                "image": "heketi/heketi:dev", +                "imagePullPolicy": "Always", +                "env": [ +                  { +                    "name": "HEKETI_USER_KEY", +                    "valueFrom": { +                      "secretKeyRef": { +                        "name": "heketi", +                        "key": "user" +                      } +                    } +                  }, +                  { +                    "name": "HEKETI_ADMIN_KEY", +                    "valueFrom": { +                      "secretKeyRef": { +                        "name": "heketi", +                        "key": "key" +                      } +                    } +                  }, +                  { +                    "name": "HEKETI_FSTAB", +                    "value": "/var/lib/heketi/fstab" +                  }, +                  { +                    "name": "HEKETI_SNAPSHOT_LIMIT", +                    "value": "14" +                  } +                ], +                "ports": [ +                  { +                    "containerPort": 8080 +                  } +                ], +                "volumeMounts": [ +                  { +                    "name": "config", +                    "mountPath": "/etc/heketi", +                    "readOnly": true +                  }, +                  { +                    "name": "ssh", +                    "mountPath": "/etc/heketi_keys", +                    "readOnly": true +                  }, +                  { +                    "name": "db", +                    "mountPath": "/var/lib/heketi" +                  } +                ], +                "readinessProbe": { +                  "timeoutSeconds": 3, +                  "initialDelaySeconds": 3, +                  "httpGet": { +                    "path": "/hello", +                    "port": 8080 +                  } +                }, +                "livenessProbe": { +                  "timeoutSeconds": 3, +                  "initialDelaySeconds": 30, +                  "httpGet": { +                    "path": "/hello", +                    "port": 8080 +                  } +                } +              } +            ], +            "volumes": [ +              { +                "name": "ssh", +                "secret": { +                  "secretName": "ands-ssh" +                } +              }, +              { +                "name": "config", +                "configMap": { +                  "name" : "heketi" +                } +              }, +              { +                "name": "db", +                "persistentVolumeClaim": { +                  "claimName" : "heketidb" +                } +              } +            ] +          } +        } +      } +    } +  ] +}
\ No newline at end of file diff --git a/roles/ands_openshift/templates/heketi/topology.json.j2 b/roles/ands_openshift/templates/heketi/topology.json.j2 new file mode 100644 index 0000000..53d683e --- /dev/null +++ b/roles/ands_openshift/templates/heketi/topology.json.j2 @@ -0,0 +1,28 @@ + +{ +    "clusters": [ +        { +            "nodes": [ +{% set comma = joiner(",") %} +{% for node in openshift_storage_nodes %} +                {{ comma() }} { +                    "node": { +                        "hostnames": { +                            "manage": [ +                                "{{ node }}" +                            ], +                            "storage": [ +                                "{{ node }}" +                            ] +                        }, +                        "zone": 1 +                    }, +                    "devices": [ +                        "/dev/{{ansible_lvm.lvs[ands_heketi_lv].vg}}/{{ ands_heketi_lv }}" +                    ] +                } +{% endfor %} +            ] +        } +    ] +} diff --git a/roles/ands_storage/README b/roles/ands_storage/README new file mode 100644 index 0000000..d17a6cd --- /dev/null +++ b/roles/ands_storage/README @@ -0,0 +1,25 @@ +Dependencies: + - Executed on the fat storage nodes + - Ands data VG and mount-point should be configured or they will default to 'ands' and /mnt/ands + +Parameters: + - ands_data_path: Mount point of Ands Data Volume, defaults to '/mnt/ands' + - ands_data_vg / ands_data_lv / ands_data_device / ands_data_volume_size: Configures the LV for Ands Data Volume, VG defaults to 'ands' +    - The Katrin VG will be created if not existing. The first non-partitioned device with at least 'ands_data_device_threshold' GB of space  +    will be used unless device is directlys specified with 'ands_data_device'. If ands_data_vg already exists, the 'ands_data_deivce' +    will be ignored. +    - Unless 'ands_data_volume_size' is specified, all available space on VG will be used (after creating heketi volume if it +    resides on the same VG) + - ands_heketi_vg / ands_heketi_lv / ands_heketi_device / ands_heketi_volume_size: Configures the LV for Heketi volume manager +    - The heketi LV is only created if 'ands_heketi_volume_size' is specified in the inventory +    - By default, the 'ands_data_vg' will be used to create heketi volumes. +    - If ands_heketi_device is specified, the VG will be created if not existing. + +Facts: + - ands_data_path: + - ands_data_vg: + +Actions: + - Configures Ands VG & LV on the storage nodes (and detects appropriate devices unless set in inventory) + - Mounts Ands data volume + 
\ No newline at end of file diff --git a/roles/ands_storage/defaults/main.yml b/roles/ands_storage/defaults/main.yml new file mode 100644 index 0000000..3eb68b5 --- /dev/null +++ b/roles/ands_storage/defaults/main.yml @@ -0,0 +1,13 @@ +--- +ands_data_vg: "ands" +#ands_data_vg: "katrin" +ands_data_path: "/mnt/{{ ands_data_vg }}" +ands_data_lv: "{{ ands_data_vg }}_data" +ands_data_volume_size: "100%FREE" +ands_data_fs: "xfs" + +ands_data_device_threshold: 8192 + +ands_heketi_vg: "{{ ands_data_vg }}" +ands_heketi_lv: "{{ ands_data_vg }}_heketi" +#ands_heketi_volume_size: "1024G" diff --git a/roles/ands_storage/handlers/main.yml b/roles/ands_storage/handlers/main.yml new file mode 100644 index 0000000..9f55771 --- /dev/null +++ b/roles/ands_storage/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: ands_heketi_change +  command: pvresize "/dev/{{ ands_heketi_vg }}/{{ ands_heketi_lv }}" +  when: heketi_stat_result.stat.exists + diff --git a/roles/ands_storage/tasks/detect_device.yml b/roles/ands_storage/tasks/detect_device.yml new file mode 100644 index 0000000..0fb9764 --- /dev/null +++ b/roles/ands_storage/tasks/detect_device.yml @@ -0,0 +1,10 @@ +- name: find large block devices +  set_fact: ands_data_device="/dev/{{ item.key }}" +#  debug: msg="{{ item.key }} - {{ (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 }} GB" +  with_dict: "{{ ansible_devices }}" +  when:  +    - not ands_data_device is defined  +    - not item.value.partitions +    - not item.value.holders +    - item.value.sectors is defined +    - ( (item.value.sectors | int) * (item.value.sectorsize | int) / 1024 / 1024 / 1024 ) > ands_data_device_threshold diff --git a/roles/ands_storage/tasks/main.yml b/roles/ands_storage/tasks/main.yml new file mode 100644 index 0000000..a86babe --- /dev/null +++ b/roles/ands_storage/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Publish some facts +  set_fact: +    ands_data_vg: "{{ ands_data_vg }}" +    ands_data_path: "{{ ands_data_path }}" + +- name: Analyze storage devices +  include: detect_device.yml +  when: not ands_data_device is defined + +- name: Create Ands VG +  lvg: vg="{{ ands_data_vg }}" pvs="{{ ands_data_device }}" +  when: ands_data_device is defined + +- name: Create Heketi VG +  lvg: vg="{{ ands_heketi_vg }}" pvs="{{ ands_heketi_device }}" +  when: ands_heketi_device is defined + +- name: Check if Heketi Volume already exists +  stat: path="/dev/{{ ands_heketi_vg }}/{{ ands_heketi_lv }}" +  register: heketi_stat_result +  changed_when: false +  when: ands_heketi_volume_size is defined + +- name: Create Heketi Volume +  lvol: vg="{{ ands_heketi_vg }}" lv="{{ ands_heketi_lv }}" size="{{ ands_heketi_volume_size }}" +  notify: ands_heketi_change +  when: ands_heketi_volume_size is defined + +- name: Add Heketi to Storage Domains +  set_fact: ands_storage_domains="{{ ands_storage_domains | union([ands_heketi_domain]) }}" +  when:  +    - (ansible_lvm.lvs[ands_heketi_lv] is defined) or (ands_heketi_volume_size is defined) +    - heketi_stat_result.stat.exists == False + +- name: Create Ands Data Volume +  lvol: vg="{{ ands_data_vg }}" lv="{{ ands_data_lv }}" size="{{ ands_data_volume_size }}" + +- name: Ensure Ands Data Volume is formatted and resize if necessary +  filesystem: fstype="xfs" resizefs="yes" dev="/dev/{{ ands_data_vg }}/{{ ands_data_lv }}" + +- name: Mount Ands Data Volume +  mount: name="{{ ands_data_path }}"  src="/dev/{{ ands_data_vg }}/{{ ands_data_lv }}" fstype="{{ ands_data_fs }}" opts="defaults" state="mounted" + +  
\ No newline at end of file diff --git a/roles/ands_vagrant_vm/README b/roles/ands_vagrant_vm/README new file mode 100644 index 0000000..ca4f0d5 --- /dev/null +++ b/roles/ands_vagrant_vm/README @@ -0,0 +1,19 @@ +Dependencies: + - Executed on a single virtualization node + - The node should have vagrant configured +  +Parameters: + - vagrant_hostname_template                            - The name prefix of generated hosts. I.e. if 'ipekatrin' is specified, the ipekatrin1, ipekatrin2, ... nodes will be produced + - vagrant_project                                      - The vagrant project name, just specifies a subdirectory with virtual machines to allow parallel execution of VMs for testing and staging setups + - vagrant_projects_dir                                 - Location of all vagrant projects + - vagrant_project_dir                                  - Location of this specific vagrant project, normally is vagrant_projects_dir/vagrant_project + - vagrant_hosts                                        - Number of VMs to generate, defaults to number of configured ands_hosts (i.e. OpenShift nodes currently) + - vagrant_cpu_cores                                    - Number of CPU cores for each VM + - vagrant_mem_size                                     - Memory per VM in GB + - vagrant_disk_size                                    - Data disk size per VM in GB + +Facts: + + +Actions: + - Creates and starts VMs diff --git a/roles/ands_vagrant_vm/defaults/main.yml b/roles/ands_vagrant_vm/defaults/main.yml new file mode 100644 index 0000000..93d92b6 --- /dev/null +++ b/roles/ands_vagrant_vm/defaults/main.yml @@ -0,0 +1,8 @@ +vagrant_hostname_template: ipeands +vagrant_project: testing +vagrant_projects_dir: /home/vagrant/projects +vagrant_project_dir: "/home/vagrant/projects/{{vagrant_project}}" +vagrant_disk_size: 60200 +vagrant_mem_size: 16384 +vagrant_cpu_cores: 4 +vagrant_hosts: {{ groups.ands_hosts | length }} diff --git a/roles/ands_vagrant_vm/files/rebuild.sh b/roles/ands_vagrant_vm/files/rebuild.sh new file mode 100644 index 0000000..73e100d --- /dev/null +++ b/roles/ands_vagrant_vm/files/rebuild.sh @@ -0,0 +1,9 @@ +#! /bin/bash + +( +    cd configs + +    vagrant destroy -f +    vagrant up --parallel +    vagrant provision +) diff --git a/roles/ands_vagrant_vm/files/run.sh b/roles/ands_vagrant_vm/files/run.sh new file mode 100755 index 0000000..2fceb3d --- /dev/null +++ b/roles/ands_vagrant_vm/files/run.sh @@ -0,0 +1,12 @@ +#! /bin/bash + +( +    cd configs + +    VBoxManage hostonlyif ipconfig vboxnet0 --ip 192.168.12.254 --netmask 255.255.255.0 +#    ( ip addr show | grep 12 ) || ip addr add 192.168.12.254/24 dev vboxnet0 +    ( ip addr show | grep 212 ) || ip addr add 192.168.212.254/24 dev vboxnet0 + +    vagrant up --parallel +    vagrant provision +) diff --git a/roles/ands_vagrant_vm/handlers/main.yml b/roles/ands_vagrant_vm/handlers/main.yml new file mode 100644 index 0000000..8c4f35d --- /dev/null +++ b/roles/ands_vagrant_vm/handlers/main.yml @@ -0,0 +1,2 @@ +- name: vagrant +  command: ./run.sh chdir="{{ vagrant_project_dir }}" diff --git a/roles/ands_vagrant_vm/tasks/main.yml b/roles/ands_vagrant_vm/tasks/main.yml new file mode 100644 index 0000000..7c3310d --- /dev/null +++ b/roles/ands_vagrant_vm/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Ensure vagrant project dir is existing +  file: name="{{ vagrant_project_dir }}/{{ item }}" state=directory +  with_items: +    - configs +    - disks + +- name: Copy authorized_keys +  copy: src="~/.ssh/authorized_keys" dest="{{ vagrant_project_dir }}/configs/authorized_keys" owner="root" group="root" + +- name: Copy scripts +  copy: src="{{ item }}" dest="{{ vagrant_project_dir }}/{{ item }}" mode="0755" +  with_items: +    - run.sh +    - rebuild.sh + +- name: Install Vagrantfile +  template: src="Vagrantfile.j2" dest="{{ vagrant_project_dir }}/configs/Vagrantfile" +  notify: +    - vagrant diff --git a/roles/ands_vagrant_vm/templates/Vagrantfile.j2 b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 new file mode 100644 index 0000000..54128d4 --- /dev/null +++ b/roles/ands_vagrant_vm/templates/Vagrantfile.j2 @@ -0,0 +1,51 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +{% set net = ands_openshift_network | ipaddr('network') | ipaddr(0) | regex_replace('\.\d+$', '')  %} +{% set storage_net = ands_storage_network | ipaddr('network') | ipaddr(0) | regex_replace('\.\d+$', '') %} +{% set netid = (  net | regex_replace('^.*\.', '') ) %} +{% set storage_netid = (  storage_net | regex_replace('^.*\.', '') ) %} +{% set macid = ( (netid | length) > 2 ) | ternary(netid, "0" ~ netid) %} + +Vagrant.configure("2") do |config| +  (1..{{ vagrant_hosts }}).each do |i| +    config.vm.define "{{ vagrant_hostname_template }}#{i}" do |node| +        node.vm.network "public_network", bridge: "br0", mac: "080027{{ macid  }}02#{i}", ip: "{{ net }}.#{i}" +        node.vm.network "private_network",  mac: "080027{{ macid }}12#{i}", ip: "{{ storage_net }}.#{i}", name: "vboxnet0" +        node.vm.box = "centos/7" +        node.vm.hostname = "{{ vagrant_hostname_template }}#{i}.ipe.kit.edu" +#	node.vm.synced_folder "../data", "/root/data" + +# Configuring DHCP in 'vm.network' causes 2 DHCP clients (dhclinet & nm) running in parallel and getting 2 IPs. +        node.vm.provision "shell", run: "always", inline: "( ip addr show | grep -v 141.52.64.15 | grep -v 141.52.64.17 | grep -v 141.52.64.28 | grep 141.52 ) || dhclient -cf /var/lib/NetworkManager/dhclient-eth0.conf eth1" +        node.vm.provision "shell", run: "always", inline: "( ip addr show | grep {{ netid }}.#{i} ) || ip addr add 192.168.{{ netid }}.#{i}/24 dev eth1" +        node.vm.provision "shell", run: "always", inline: "( ip addr show | grep {{ storage_netid }}.#{i} ) || ifcfg eth2 192.168.{{ storage_netid }}.#{i}" +        node.vm.provision "shell", run: "always", inline: "chmod +r /etc/sysconfig/network-scripts/ifcfg-eth*" +        node.vm.provision "shell", run: "always", inline: "chcon --reference /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/ifcfg-eth*" +         +        node.vm.provision "shell" do |s| +            ssh_pub_key = File.readlines("authorized_keys").first.strip +            s.inline = <<-SHELL +                mkdir -p /root/.ssh/ +                echo #{ssh_pub_key} >> /root/.ssh/authorized_keys +            SHELL +        end + +        node.vm.provider "virtualbox" do |vb| +            vb.memory = "{{ 1024 * (vagrant_mem_size | int) }}" +            vb.cpus = {{ vagrant_cpu_cores }} +            #vb.gui = true +            vb.customize [ +                "modifyvm", :id, +#               "--ostype", "Linux_64", +                "--audio", "none", +            ] +            vb.customize [ +        	'createhd', '--filename', "../disks/#{i}", '--format', 'VDI', '--size', {{ 1024 * (vagrant_disk_size | int) }} +            ] +            vb.customize [ +        	'storageattach', :id, '--storagectl', 'IDE Controller', '--port', 1, '--device', 0,'--type', 'hdd', '--medium', "../disks/#{i}.vdi" +            ] +        end +    end +  end +end diff --git a/roles/common/README b/roles/common/README new file mode 100644 index 0000000..c8bd679 --- /dev/null +++ b/roles/common/README @@ -0,0 +1,11 @@ +Dependencies: + - Executed on all nodes + - No dependencies & no facts + +Parameters: + extra_packages: list of extra packages to install +  +Actions: + - Enables standard repositories + - Install a set of common packages on all nodes (mc, etc.) + 
\ No newline at end of file diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml new file mode 100644 index 0000000..3f49a39 --- /dev/null +++ b/roles/common/tasks/main.yml @@ -0,0 +1,23 @@ +- name: Ensure all required repositories are configured +  package: name={{item}} state=present +  with_items: +    - epel-release +    - centos-release-openshift-origin + +# Seems we need iptables-services at least temporary... +- name: Ensure all required packages are installed +  package: name={{item}} state=present +  with_items: +    - mc +    - bzr +    - git +    - yamllint +    - pyOpenSSL +    - python-passlib +    - python2-ruamel-yaml +    - python2-jmespath +    - iptables-services + +- name: Ensure all extra packages are installed +  package: name={{item}} state=present +  with_items: "{{ extra_packages | default([]) }}" diff --git a/roles/docker/README b/roles/docker/README new file mode 100644 index 0000000..b9b1537 --- /dev/null +++ b/roles/docker/README @@ -0,0 +1,18 @@ +Dependencies: + - Executed on all nodes. On the storage nodes, the katrin_storage role should be executed before + - Unless docker_storage_vg is defined, it will try to detect the first VG with the space available and which is not equal to katrin_data_vg + +Parameters: + docker_storage_vg / docker_storage_device: Configures VG to use for docker images +    - If docker_storage_vg is not set it will try to detect the first VG with space available and which is not listed in the 'docker_exclude_vgs' +    - If such VG is not found, it will create VG on the specified device + docker_exclude_vgs: Lists VGs which should not be used to host Docker volumes + docker_min_size: Specifies the minimum size requirement of Docker LV + docker_volume_size: Adjust the size of Docker LV +  +Facts: + - Nope + +Actions; + - Detects or creates Docker VG and creates inside the 'docker-pool' LV (autogrowing). The initial size may be given with docker_storage_size + - Installs docker, configures LVM storage backend, and enables docker service diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml new file mode 100644 index 0000000..6542789 --- /dev/null +++ b/roles/docker/defaults/main.yml @@ -0,0 +1,3 @@ +docker_min_size: 100 +docker_exclude_vgs: "{{ ands_data_vg is defined | ternary( [ ands_data_vg ], [] ) }}" +docker_lv: "docker-pool" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..1263cd2 --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: Ensure docker is installed +  yum: name="docker" state="present" + +- name: start docker +  service: name="docker" state="started" + +- name: Configure bridge-nf-call-iptables with sysctl +  sysctl: name="net.bridge.bridge-nf-call-iptables"  value=1 state=present sysctl_set=yes + +- name: Configure bridge-nf-call-ip6tables with sysctl +  sysctl: name="net.bridge.bridge-nf-call-ip6tables"  value=1 state=present sysctl_set=yes + +- name: Determine if loopback +  shell: docker info | grep 'Data file:.*loop' +  register: loop_device_check +  failed_when: false +  changed_when: loop_device_check.rc == 0 + +- debug: msg="{{ loop_device_check.stderr }}" +  when: loop_device_check.stderr + +- include: storage.yml +  when: loop_device_check.rc == 0 + +- name: extend the vg +  lvol: vg="{{ ansible_lvm['lvs'][docker_lv]['vg'] }}" lv="docker_lv" size="{{ docker_volume_size }}" +  when: docker_volume_size is defined + +- name: stop docker +  service: name="docker" enabled=yes state=started +  
\ No newline at end of file diff --git a/roles/docker/tasks/storage.yml b/roles/docker/tasks/storage.yml new file mode 100644 index 0000000..595979c --- /dev/null +++ b/roles/docker/tasks/storage.yml @@ -0,0 +1,41 @@ +--- +- name: list volume groups +  set_fact: available_vg="{{ item.key }}" +  with_dict: "{{ ansible_lvm.vgs }}" +  when: +    - not available_vg is defined +    - not item.key in ( docker_exclude_vgs | default([]) ) +    - (item.value.free_g | int) > (docker_volume_size | default(docker_min_size)) + +- set_fact: docker_storage_vg="{{ available_vg }}" +  when: (not docker_storage_vg is defined) and (available_vg is defined) + +- fail: msg="Can't detect Docker VG" +  when: not docker_storage_vg is defined + +- name: check to see if {{ docker_storage_device }} exists +  command: "test -e {{ docker_storage_device }}" +  when: docker_storage_device is defined + +- set_fact: docker_storage_config="VG={{ docker_storage_vg }} AUTO_EXTEND_POOL=true" + +- set_fact: docker_storage_config="{{ docker_storage_config }} DEVS={{ docker_storage_device }}" +  when: ( docker_storage_device is defined ) and ( not ansible_lvm.vgs.{{ docker_storage_vg }} is defined ) +   +- name: stop docker +  service: name="docker" state="stopped" + +- name: delete /var/lib/docker +  file: path="/var/lib/docker" state=absent + +- name: generate docker-storage-setup config file +  copy: +      content: "{{ docker_storage_config }}" +      dest: /etc/sysconfig/docker-storage-setup +      owner: root +      group: root +      mode: 0664 + +- name: docker storage setup +  command: docker-storage-setup + diff --git a/roles/glusterfs/README b/roles/glusterfs/README new file mode 100644 index 0000000..9a319d0 --- /dev/null +++ b/roles/glusterfs/README @@ -0,0 +1,26 @@ +Dependencies: + - Executed on all nodes.  +    * The GlusteFS servers are configured on all storage servers.  +    * The GlusterFS clients on all the servers +    * The volumes are created in the configured domains + - Expects that partition for bricks is already prepared + +Parameters: +  glusterfs_version: should be defined (without dot, like 39) +  glusterfs_transport: Transport to use, defaults to rdma + +  glusterfs_network: CIDR for gluster internal Infiniband network +    - if 192.168.12.0/24 is specified, the 'ipekatrin1' storage node will be mapped '192.168.12.1' IP, etc. +  glusterfs_servers: List of storage servers in glusterfs_network +  glusterfs_bricks_path: The location to store volume bricks, defaults to 'ands_data_path'/glusterfs +  glusterfs_domains: Volume configuration + +Facts: + +Actions: + - Installs appropriate GlusterFS repositories (to match specified version) + - Installs required packages, only native clients on the servers without storage + - Enables firewalld if necessary and allows GlusterFS service + - Configures SELinux, etc.  + - Probes all storage nodes using internal Infiniband IPs + - Creates requested volumes and mounts them diff --git a/roles/glusterfs/defaults/main.yml b/roles/glusterfs/defaults/main.yml new file mode 100644 index 0000000..9587a9b --- /dev/null +++ b/roles/glusterfs/defaults/main.yml @@ -0,0 +1,11 @@ +--- +glusterfs_version: 39 +glusterfs_transport: rdma + +glusterfs_network: "{{ ands_storage_network }}" +glusterfs_servers: "{{ ands_storage_servers }}" +glusterfs_bricks_path: "{{ ands_data_path }}/glusterfs" +glusterfs_domains: "{{ ands_storage_domains }}" + +glusterfs_all_subroles: "{{ [ 'software', 'volumes' ] }}" +glusterfs_subroles: "{{ ( subrole is defined ) | ternary( [ subrole ], glusterfs_all_subroles ) }}" diff --git a/roles/glusterfs/tasks/cfg/vols2.yml b/roles/glusterfs/tasks/cfg/vols2.yml new file mode 120000 index 0000000..b6a3e8f --- /dev/null +++ b/roles/glusterfs/tasks/cfg/vols2.yml @@ -0,0 +1 @@ +vols3.yml
\ No newline at end of file diff --git a/roles/glusterfs/tasks/cfg/vols3.yml b/roles/glusterfs/tasks/cfg/vols3.yml new file mode 100644 index 0000000..d094797 --- /dev/null +++ b/roles/glusterfs/tasks/cfg/vols3.yml @@ -0,0 +1,13 @@ +--- +- name: "Create {{ name }} volume" +  gluster_volume:  +    state: present +    name: "{{ name }}" +    cluster: "{{ domain_servers | join(',') }}" +    replicas: "{{ domain_servers | length }}" +    bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}" +    transport: "{{ glusterfs_transport }}" + + +- name: "Start {{ name }} volume" +  gluster_volume: state="started" name="{{ name }}" diff --git a/roles/glusterfs/tasks/common.yml b/roles/glusterfs/tasks/common.yml new file mode 100644 index 0000000..7675cb9 --- /dev/null +++ b/roles/glusterfs/tasks/common.yml @@ -0,0 +1,16 @@ +--- +- name: Ensure GlusterFS repositories are present +  yum: name="centos-release-gluster{{ glusterfs_version }}" state=present +    +- name: Ensure GlusterFS is installed +  yum: name={{item}} state=present +  with_items: +    - glusterfs-cli +    - glusterfs-fuse +    - glusterfs-libs +    - glusterfs-rdma +    - glusterfs +    - libsemanage-python +             +- name: Allow fuse in SELinux configuration +  seboolean: name="virt_sandbox_use_fusefs" state="yes" persistent="yes" diff --git a/roles/glusterfs/tasks/create_domain.yml b/roles/glusterfs/tasks/create_domain.yml new file mode 100644 index 0000000..b3fc89e --- /dev/null +++ b/roles/glusterfs/tasks/create_domain.yml @@ -0,0 +1,8 @@ +--- +- name: Configure volumes +  include: create_volume.yml +  with_dict: "{{ domain.volumes }}" +  vars: +    domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" +  loop_control: +    loop_var: volume diff --git a/roles/glusterfs/tasks/create_volume.yml b/roles/glusterfs/tasks/create_volume.yml new file mode 100644 index 0000000..9b955b0 --- /dev/null +++ b/roles/glusterfs/tasks/create_volume.yml @@ -0,0 +1,4 @@ +--- +- include: "{{ volume.value.type }}/vols{{((domain_servers | length) < 4) | ternary((domain_servers | length), 3) }}.yml" +  vars: +    name: "{{ volume.key }}" diff --git a/roles/glusterfs/tasks/main.yml b/roles/glusterfs/tasks/main.yml new file mode 100644 index 0000000..dbd1aad --- /dev/null +++ b/roles/glusterfs/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: common.yml +  when: +    - "'software' in glusterfs_subroles" + +- include: server.yml +  when:  +    - "'software' in glusterfs_subroles" +    - "'ands_storage_servers' in group_names" + +- include: volumes.yml +  when: +    - "'volumes' in glusterfs_subroles" diff --git a/roles/glusterfs/tasks/mount_domain.yml b/roles/glusterfs/tasks/mount_domain.yml new file mode 100644 index 0000000..94b6677 --- /dev/null +++ b/roles/glusterfs/tasks/mount_domain.yml @@ -0,0 +1,12 @@ +--- +- name: Mount volumes +  include: mount_volume.yml +  with_dict: "{{ domain.volumes }}" +  vars: +    name: "{{ volume.key }}" +    path: "{{ volume.value.mount }}" +    server_group: "{{ domain.servers }}" +    domain_servers: "{{ groups[domain.servers] | map('extract', hostvars, 'ands_storage_hostname') | list }}" +  when: volume.value.mount is defined +  loop_control: +    loop_var: volume diff --git a/roles/glusterfs/tasks/mount_volume.yml b/roles/glusterfs/tasks/mount_volume.yml new file mode 100644 index 0000000..2aea7f6 --- /dev/null +++ b/roles/glusterfs/tasks/mount_volume.yml @@ -0,0 +1,8 @@ +--- +- name: Mount {{ name }} volume +  mount: name="{{ path }}"  src="localhost:{{ name }}" fstype="glusterfs" opts="defaults,_netdev" state="mounted" +  when: server_group in group_names +   +- name: Mount {{ name }} volume +  mount: name="{{ path }}"  src="{{ domain_servers | join(",") }}:{{ name }}" fstype="glusterfs" opts="defaults,_netdev" state="mounted" +  when: not server_group in group_names diff --git a/roles/glusterfs/tasks/server.yml b/roles/glusterfs/tasks/server.yml new file mode 100644 index 0000000..328a8c5 --- /dev/null +++ b/roles/glusterfs/tasks/server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure GlusterFS is installed +  yum: name={{item}} state=present +  with_items: +    - glusterfs-server +    - glusterfs-rdma + +- name: Ensure GlusterFS service is running +  service: name=glusterd state=started enabled=yes + +- name: Ensure firewalld is running +  service: name=firewalld state=started enabled=yes + +- name: Configure firewalld +  firewalld: rich_rule="rule family=ipv4 source address={{glusterfs_network}} service name=glusterfs accept" state="enabled" permanent="true" immediate="true" +  when: glusterfs_network is defined +   +- name: Configure firewalld +  firewalld: service="glusterfs" state="enabled" permanent="true" immediate="true" +  when: not glusterfs_network is defined + +- name: Reload firewalld rules +  shell: firewall-cmd --reload + +- name: Create folder for GlusterFS bricks +  file: dest="{{glusterfs_bricks_path}}" owner="root" group="root" mode="0755" state="directory" + +- name: Configure gluster peers (on first host) +  shell: gluster peer probe {{item}} +  run_once: true +  with_items: "{{ glusterfs_servers }}" diff --git a/roles/glusterfs/tasks/tmp/vols2.yml b/roles/glusterfs/tasks/tmp/vols2.yml new file mode 120000 index 0000000..b6a3e8f --- /dev/null +++ b/roles/glusterfs/tasks/tmp/vols2.yml @@ -0,0 +1 @@ +vols3.yml
\ No newline at end of file diff --git a/roles/glusterfs/tasks/tmp/vols3.yml b/roles/glusterfs/tasks/tmp/vols3.yml new file mode 100644 index 0000000..9565bb3 --- /dev/null +++ b/roles/glusterfs/tasks/tmp/vols3.yml @@ -0,0 +1,11 @@ +--- +- name: "Create {{ name }} volume" +  gluster_volume:  +    state: present +    name: "{{ name }}" +    cluster: "{{ domain_servers | join(',') }}" +    bricks: "{{ glusterfs_bricks_path }}/brick-{{ name }}" +    transport: "{{ glusterfs_transport }}" + +- name: "Start {{ name }} volume" +  gluster_volume: state="started" name="{{ name }}" diff --git a/roles/glusterfs/tasks/volumes.yml b/roles/glusterfs/tasks/volumes.yml new file mode 100644 index 0000000..e393c08 --- /dev/null +++ b/roles/glusterfs/tasks/volumes.yml @@ -0,0 +1,15 @@ +- name: Configure volume domains +  include: create_domain.yml +  run_once: true +  delegate_to: "{{ groups[domain.servers][0] }}" +  with_items: "{{ glusterfs_domains }}" +  loop_control: +    loop_var: domain   + +- name: Mount volume domains +  include: mount_domain.yml +  when: ( domain.clients | default("---") ) in group_names +  with_items: "{{ glusterfs_domains }}" +  loop_control: +    loop_var: domain   + diff --git a/roles/keepalived/.gitignore b/roles/keepalived/.gitignore new file mode 100644 index 0000000..aa16e10 --- /dev/null +++ b/roles/keepalived/.gitignore @@ -0,0 +1,2 @@ +.DS_Store/* +.vagrant/* diff --git a/roles/keepalived/README b/roles/keepalived/README new file mode 100644 index 0000000..956dbcb --- /dev/null +++ b/roles/keepalived/README @@ -0,0 +1,13 @@ +Dependencies: + - Run on OpenShift master nodes + +Parameters: + - keepalived_vips: List of Virtual IPs + - keepalived_iface: Network interface +  +Facts: + +Actions: + - Sets up and configures keepalived daemon + - Configures sysctl and firewall + diff --git a/roles/keepalived/defaults/main.yml b/roles/keepalived/defaults/main.yml new file mode 100644 index 0000000..a7087b0 --- /dev/null +++ b/roles/keepalived/defaults/main.yml @@ -0,0 +1,12 @@ +--- +keepalived_vips: "{{ ands_ipfailover_vips | default([]) }}" +keepalived_iface: "{{ ands_ipfailover_interface | default('eth0') }}" + +keepalived_master_prio: 80 +keepalived_backup_prio: 20 +keepalived_check_interval: 5 + +keepalived_password: "{{ ands_secrets.keepalived }}" + +keepalived_node_id: "{{ play_hosts.index(inventory_hostname) }}" +keepalived_num_nodes: "{{ play_hosts | length }}" diff --git a/roles/keepalived/handlers/main.yml b/roles/keepalived/handlers/main.yml new file mode 100644 index 0000000..2ac9fe3 --- /dev/null +++ b/roles/keepalived/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart keepalived +  service: name=keepalived state=restarted diff --git a/roles/keepalived/tasks/main.yml b/roles/keepalived/tasks/main.yml new file mode 100644 index 0000000..771faa7 --- /dev/null +++ b/roles/keepalived/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: Install keepalived  +  yum: name=keepalived state=present +  notify: restart keepalived + +- name: Configure net.ipv4.ip_nonlocal_bind with sysctl +  sysctl: name="net.ipv4.ip_nonlocal_bind"  value=1 state=present sysctl_set=yes + +- name: Ensure firewalld is running +  service: name=firewalld state=started enabled=yes + +- name: Configure firewalld +  firewalld: rich_rule="rule protocol value=vrrp accept" state="enabled" permanent="true" immediate="true" + +- name: Install configuration +  template: src=keepalived.conf.j2 dest=/etc/keepalived/keepalived.conf owner=root group=root mode=0600 +  tags: keepalived +  notify: restart keepalived + +- name: Start keepalived +  service: name=keepalived state=running +  tags: keepalived diff --git a/roles/keepalived/templates/keepalived.conf.j2 b/roles/keepalived/templates/keepalived.conf.j2 new file mode 100644 index 0000000..8d9a580 --- /dev/null +++ b/roles/keepalived/templates/keepalived.conf.j2 @@ -0,0 +1,36 @@ +global_defs { +} + +vrrp_script track { +    script "[ -f /etc/keepalived/track.sh ] || exit 0 && /etc/keepalived/track.sh" +    interval {{ keepalived_check_interval }} +} + +{% for vips in keepalived_vips  %} +{% set id = (  vips | ipaddr('address') | regex_replace('^.*\.', '') ) %} + +vrrp_instance VI_{{ loop.index }} { + +    virtual_router_id {{ id }} + +    state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary('MASTER', 'BACKUP') }} +    state {{ (( ( loop.index - 1) % (keepalived_num_nodes | int) ) == (keepalived_node_id | int) ) | ternary(keepalived_master_prio, keepalived_backup_prio) }} + +    interface {{ keepalived_iface }} + +    virtual_ipaddress { +        {{ vips }} dev {{ keepalived_iface }}  +    } +     +    advert_int 1 + +    authentication { +        auth_type PASS +        auth_pass {{ keepalived_password }} +    } + +    track_script { +        track +    } +} +{% endfor %} diff --git a/roles/openshift_certificate_expiry b/roles/openshift_certificate_expiry new file mode 120000 index 0000000..789348e --- /dev/null +++ b/roles/openshift_certificate_expiry @@ -0,0 +1 @@ +../../openshift-ansible/roles/openshift_certificate_expiry
\ No newline at end of file diff --git a/roles/openshift_resource/defaults/main.yml b/roles/openshift_resource/defaults/main.yml new file mode 100644 index 0000000..ec44c4f --- /dev/null +++ b/roles/openshift_resource/defaults/main.yml @@ -0,0 +1 @@ +template_path: "/mnt/provision/templates" diff --git a/roles/openshift_resource/tasks/command.yml b/roles/openshift_resource/tasks/command.yml new file mode 100644 index 0000000..c8e8d04 --- /dev/null +++ b/roles/openshift_resource/tasks/command.yml @@ -0,0 +1,17 @@ +- block: + +  - name: Lookup the specified resource +    command: "oc get -n {{project}} {{resource}}" +    register: result +    failed_when: false +    changed_when: (result | failed) + +  - name: Detroy existing resources +    command: "oc delete -n {{project}} {{resource}}" +    failed_when: false +    when: (recreate|default(false))  + +  - name: Executing command +    command: "oc -n {{ project }} {{ command }}" +    when: (recreate|default(false)) or (result | changed) +  run_once: true diff --git a/roles/openshift_resource/tasks/lookup.yml b/roles/openshift_resource/tasks/lookup.yml new file mode 100644 index 0000000..07beb81 --- /dev/null +++ b/roles/openshift_resource/tasks/lookup.yml @@ -0,0 +1,6 @@ +--- +- name: Lookup the specified resource +  command: "oc get -n {{project}} {{rkind}}/{{rname}}" +  register: result +  failed_when: false +  changed_when: (result | failed) diff --git a/roles/openshift_resource/tasks/main.yml b/roles/openshift_resource/tasks/main.yml new file mode 100644 index 0000000..698efea --- /dev/null +++ b/roles/openshift_resource/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- block: +  - name: "Read template {{ template }}" +    command: cat '{{template_path}}/{{template}}' +    changed_when: false +    register: results + +  - name: Parse JSON templates +    set_fact: tmpl="{{ results.stdout | from_json }}" +    when: template.find(".json") != -1 + +  - name: Parse YaML templates +    set_fact: tmpl="{{ results.stdout | from_yaml }}" +    when: template.find(".json") == -1 + +  - include: template.yml +    when: tmpl.kind == "Template" + +  - include: resource.yml +    when: tmpl.kind != "Template" +  +  run_once: true diff --git a/roles/openshift_resource/tasks/resource.yml b/roles/openshift_resource/tasks/resource.yml new file mode 100644 index 0000000..326abbb --- /dev/null +++ b/roles/openshift_resource/tasks/resource.yml @@ -0,0 +1,20 @@ +--- +- block: +  - name: Find out which resources we are going to configure +    set_fact: rkind="{{ tmpl.kind }}" rname="{{ tmpl.metadata.name }}" + +  - name: Lookup the specified resource +    command: "oc get -n {{project}} {{rkind}}/{{rname}}" +    register: result +    failed_when: false +    changed_when: (result | failed) + +  - name: Detroy existing resources +    command: "oc delete -n {{project}} {{rkind}}/{{rname}}" +    failed_when: false +    when: (recreate|default(false))  + +  - name: Create resources defined in template +    command: "oc create -n {{project}} -f '{{ template_path }}/{{ template }}' {{ create_args | default('') }}" +    when: (recreate|default(false)) or (result | changed) +  run_once: true diff --git a/roles/openshift_resource/tasks/template.yml b/roles/openshift_resource/tasks/template.yml new file mode 100644 index 0000000..c93dec5 --- /dev/null +++ b/roles/openshift_resource/tasks/template.yml @@ -0,0 +1,25 @@ +--- +- block: +  - name: Find out which resources we are going to configure +    set_fact: resources="{{ tmpl | json_query(query) }}" +    vars: +      query: "objects[*].{kind: kind, name: metadata.name}" +       +  - name: Lookup the specified resource +    command: "oc get -n {{project}} {{item.kind}}/{{item.name}}" +    register: results +    failed_when: false +    changed_when: (results | failed) +    with_items: "{{ resources | default([]) }}" +#    when: not (recreate|default(false))  + +  - name: Detroy existing resources +    command: "oc delete -n {{project}} {{resources[item|int].kind}}/{{resources[item|int].name}}" +    failed_when: false +    with_sequence: start=0 count="{{resources | default([]) | length}}" +    when: ((recreate|default(false)) or (results | changed)) and (results.results[item|int].rc == 0) + +  - name: Create resources defined in template +    shell: "oc process -f '{{ template_path }}/{{template}}' {{ template_args | default('') }} | oc create -n {{project}} -f - {{ create_args | default('') }}" +    when: (recreate|default(false)) or (results | changed) +  run_once: true diff --git a/roles/openvpn/README b/roles/openvpn/README new file mode 100644 index 0000000..9c64b0d --- /dev/null +++ b/roles/openvpn/README @@ -0,0 +1,12 @@ +Dependencies: + - Runs on all OpenShift nodes + +Parameters: + - ands_openshift_lb: The load balancer which OpenVPN clients (non-master nodes) will be using to get into the network +  +Facts: + +Actions: + - Sets up and configures OpenVPN servers & clients + - Opens firewall port + diff --git a/roles/openvpn/defaults/main.yml b/roles/openvpn/defaults/main.yml new file mode 100644 index 0000000..513936a --- /dev/null +++ b/roles/openvpn/defaults/main.yml @@ -0,0 +1,15 @@ +openvpn_port: 1194 +openvpn_dir: "/etc/openvpn" +openvpn_config: "katrin" +openvpn_config_file: "{{openvpn_dir}}/{{openvpn_config}}.conf" +openvpn_keydir: "{{openvpn_dir}}/keys_{{openvpn_config}}" +openvpn_ccdir: "{{openvpn_dir}}/ccd_{{openvpn_config}}" +openvpn_service: "openvpn@{{openvpn_config}}.service" + +openvpn_lb: "{{ ands_openshift_lb }}" +openvpn_servers: "masters" + + +openvpn_server_id: "{{ (openvpn_servers in group_names) | ternary(groups[openvpn_servers].index((openvpn_servers in group_names) | ternary(inventory_hostname, groups[openvpn_servers][0])), -1) }}" +openvpn_subnet_id: "{{ (katrin_openvpn_subnet_offset | int) + (openvpn_server_id | int) }}" +openvpn_net: "{{ katrin_openvpn_network | ipsubnet(katrin_openvpn_subnet_bits, openvpn_subnet_id) }}" diff --git a/roles/openvpn/files/ca/ca.crt b/roles/openvpn/files/ca/ca.crt new file mode 100644 index 0000000..a37743b --- /dev/null +++ b/roles/openvpn/files/ca/ca.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDojCCAwugAwIBAgIJAMIDvuFyaww1MA0GCSqGSIb3DQEBBQUAMIGTMQswCQYD +VQQGEwJOTzERMA8GA1UECBMIbWlkZ2FhcmQxETAPBgNVBAcTCG1pZGdhYXJkMREw +DwYDVQQKEwhEYXJrU29mdDELMAkGA1UECxMCQ0ExFDASBgNVBAMTC0RhcmtTb2Z0 +IENBMSgwJgYJKoZIhvcNAQkBFhlkYXJrc29mdEBkc2lkZS5keW5kbnMub3JnMB4X +DTA5MTAyMjAyMTgzOVoXDTE5MTAyMDAyMTgzOVowgZMxCzAJBgNVBAYTAk5PMREw +DwYDVQQIEwhtaWRnYWFyZDERMA8GA1UEBxMIbWlkZ2FhcmQxETAPBgNVBAoTCERh +cmtTb2Z0MQswCQYDVQQLEwJDQTEUMBIGA1UEAxMLRGFya1NvZnQgQ0ExKDAmBgkq +hkiG9w0BCQEWGWRhcmtzb2Z0QGRzaWRlLmR5bmRucy5vcmcwgZ8wDQYJKoZIhvcN +AQEBBQADgY0AMIGJAoGBAKDdlL90dk2ixdjG6Fm5hPjvqex2ZqIWk7l+hh9AJjhT +oFYO5DKTb4JioKYA76KZ7uCgQzxhiDfma3agw7WGR8H+n28AzkxgqTEKWU4ysrxQ +CtykKO3qs79iYHdcX1NRUAx22cpBnQjq7HJkXJWg5i+3RPSyk8Vl2QC8BzfiLH/D +AgMBAAGjgfswgfgwHQYDVR0OBBYEFF+geRyB0QoAUHIRgtlq3sLwiZIIMIHIBgNV +HSMEgcAwgb2AFF+geRyB0QoAUHIRgtlq3sLwiZIIoYGZpIGWMIGTMQswCQYDVQQG +EwJOTzERMA8GA1UECBMIbWlkZ2FhcmQxETAPBgNVBAcTCG1pZGdhYXJkMREwDwYD +VQQKEwhEYXJrU29mdDELMAkGA1UECxMCQ0ExFDASBgNVBAMTC0RhcmtTb2Z0IENB +MSgwJgYJKoZIhvcNAQkBFhlkYXJrc29mdEBkc2lkZS5keW5kbnMub3JnggkAwgO+ +4XJrDDUwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQBey4alwOjkx6LG +csNMOeofpSVr79muQALum9rdsZVBb93x9ygSJJ8ygCgolXDGF4uAGX31kbYtiANY +rXef9gNWZLlMt2NPcJzV53hbXnFDYOSpFwUCFwvBAFkhIv4r1TjqxHSDiRdTda31 +0J1sESMtMZook/QKNx+46CQrjUGWzA== +-----END CERTIFICATE----- diff --git a/roles/openvpn/files/ca/ca.key b/roles/openvpn/files/ca/ca.key new file mode 100644 index 0000000..f1df0c4 --- /dev/null +++ b/roles/openvpn/files/ca/ca.key @@ -0,0 +1,50 @@ +$ANSIBLE_VAULT;1.1;AES256 +66303364323939633166383539303539333162653336313339616434663839353333613063623262 +6564343033366235336230326161636661393638353336320a646631393037333838633831616532 +33653431326435636135643835613738333634636566373131323634633730343836353562633464 +3561313137613166660a613534623665646637386161633031393461343762663930633634616634 +33366532313537643035623239616137616561633366303132633430636234333534383563663236 +37346239353437333362663862626334383866623338653061326632646363383563356264336665 +65383962646131393165613838623661613865343165396135633761646137306436303266336634 +63356239373032303261353937393664663265396161366163356463633539393635643762366165 +66626230386662353361646663343464643534313332323565386230613463666238356261353730 +35663337626164333233323437393432336535383437653036643338363662313138363037323666 +61343061626262316461613838653834303764623733393131303035346336393333656233383666 +32666235356231663838386530306333383463616362303563363164343230383066303732666533 +38666435313437636132393836313630323839333237623130646366363633393939646261653763 +31313634313134623639303134653264646638666563366334366235653339303031313262346465 +39613934623461393438613363376566646432313931333731333939373966316464373137363431 +62626134303730613736316263616133323863616565326463656562656462316636613933393934 +65303761343762626232633634373233386334643334613337306562613938656136303837616637 +36643363386166373432306236333438663536303065363961613236366465356232303331376233 +32656637373235643839623539633761653164323230363763383737303566326239623530633962 +30616230363434363439383838633765633632663963323337393430643966616663383662643838 +32636465363130366232643933323066383965643032643537616531306239616662633932653866 +64363939343935323137356433373538613930653332303834386436386331313334333031376533 +39346130646439326531356239376531343730656232393331313633363765316439336565353331 +61316266356161366534636138363161643363666266616662306130353334323636363062393539 +65633565333037393264346265303461333734623233306563643732613432623330623232393637 +37323635323432343738376462646639313239313465383661353763306437373939353737356437 +30323037656231653534316665633431343137666665303831346139626539316561303739633339 +61666564643766343061623031666563663962626533313264323435343734343533656430636230 +34386634613739393433306361643634646266626462626333323936306234393430343331313366 +36363537373735613235383164343764643532316561616530306636636431386336323531306639 +66376435636339613963346463653162373137393531373031316635323561393239633661383035 +62343464336639643463633766396263623966613031633666336666333233316530363961336263 +62346334303363323437356535356665393065313665663566336661356334633637646561646135 +66656664303239336263313765623836393937303937343431666234343064636533363463396434 +35366333393738373063633834323038353065616364383234326531303666643139663431613437 +38623332333733356434636462643162396137623138663132336131306137623866346339623261 +32373139376636303636643766343864666263383239316437643533303463383866643830646563 +39353138623435633162663932313130303161656462316237353766313465646332326139653066 +33333138626665363766616630333166636530663163366163373432646463303838316134306463 +39383066396237313132636339656166353336386636373336366238623965643139646138376532 +39666235353662663439353263343834653734616337623938643137396134303835363662316263 +32636337303134383737343238643736373565366462313963353434623935616537613064613931 +33656337653866376630316134326431343139306661383162373163353966633565653336643738 +65653630373638616232663966613330303133366166383135366432353865636534633733343561 +64336631653833356639316135343437343631373831666265643763363262633966656337613535 +33613432323431646334633866626633343062656532666234316565396363346332306632303861 +37393739323835363462363362333966393732643565396532613734313938643737666365376236 +63343062303563393061613436623737303634393365306563363563616665336263326337636464 +3739 diff --git a/roles/openvpn/files/keys/dh1024.pem b/roles/openvpn/files/keys/dh1024.pem new file mode 100644 index 0000000..39e2099 --- /dev/null +++ b/roles/openvpn/files/keys/dh1024.pem @@ -0,0 +1,18 @@ +$ANSIBLE_VAULT;1.1;AES256 +38326437373461343039653963383935386135613432376662636163636131656139393365616237 +6239376630626666303034353733383534666438636439640a663935663538366439363165613436 +35616530653061633137343034616633383833626438353131663264333565343635373239643864 +6233623239383637640a363637316237346561376264336534633563613462633464376238623165 +64653165666663663434316638633238313963383931326138396335613931306233343062346337 +65323438656461366132663266336637306435663064306636333631613135356635636136316665 +63343265616261653635303063346161613639636262363835623161626264636139326139366234 +61656336326434303038633532353334356165623438353637653162323462383962666536353938 +33633163343165353634393965663636306630623536343431633866633932666539656666626339 +38386131346365373237346230653962363639373337313130383263636130626133623838383936 +38326433666237393261616162306365336530383232343430613535356261323761626337386633 +64623637333763653462383635333035623164396130383066313238623633356665663937366563 +61333138393537653766346637656261373762636330386263333337633563356263326561313835 +30333931333966333235333732613931346538346237626664616439643737653032376363343662 +35643462646562393934316534386134663566633037613131326434323933373839653963663730 +61356166616566643665666330343039313630646438363239303039653537646566646461313530 +3566 diff --git a/roles/openvpn/files/openvpn_logrotate.conf b/roles/openvpn/files/openvpn_logrotate.conf new file mode 100644 index 0000000..7dac758 --- /dev/null +++ b/roles/openvpn/files/openvpn_logrotate.conf @@ -0,0 +1,9 @@ +/var/log/openvpn.log { +    rotate 4 +    weekly +    missingok +    notifempty +    sharedscripts +    copytruncate +    delaycompress +} diff --git a/roles/openvpn/handlers/main.yml b/roles/openvpn/handlers/main.yml new file mode 100644 index 0000000..befbcf5 --- /dev/null +++ b/roles/openvpn/handlers/main.yml @@ -0,0 +1,12 @@ +--- +- name: daemon-reload +  command: systemctl daemon-reload + +- name: openvpn +  service: name="{{openvpn_service}}" state=restarted + +- name: firewalld +  shell: firewall-cmd --reload + + +  
\ No newline at end of file diff --git a/roles/openvpn/tasks/config.yml b/roles/openvpn/tasks/config.yml new file mode 100644 index 0000000..67fdfa1 --- /dev/null +++ b/roles/openvpn/tasks/config.yml @@ -0,0 +1,28 @@ +- name: create openvpn configuration directory +  file: path="{{openvpn_dir}}" state=directory + +- name: create openvpn key directory +  file: path="{{openvpn_keydir}}" state=directory + +- name: create openvpn client config directory +  file: path="{{openvpn_ccdir}}" state=directory +  when: openvpn_servers in group_names + +- name: copy templates +  template: src="{{item}}" dest="{{openvpn_ccdir}}/{{ item | basename | regex_replace('\.j2','') }}" owner=root group=root mode="0644" +  with_fileglob:  +    - ../templates/{{ openvpn_config }}/ccd/* +  when: openvpn_servers in group_names + +- name: generate cluster templates +  template: src="{{ openvpn_config }}/ccd.j2" dest="{{openvpn_ccdir}}/{{ hostvars[item]['ansible_hostname'] }}" owner=root group=root mode="0644" +  vars: +    id: "{{ hostvars[item]['ands_host_id'] }}" +  with_inventory_hostnames: +    - nodes:!{{openvpn_servers}} +  when: openvpn_servers in group_names + +- name: create openvpn config file +  template: src="{{ openvpn_config }}/{{ (openvpn_servers in group_names) | ternary('openvpn_server.j2', 'openvpn_client.j2') }}" dest="{{ openvpn_config_file }}" owner=root group=root +  notify: +    - openvpn diff --git a/roles/openvpn/tasks/keys.yml b/roles/openvpn/tasks/keys.yml new file mode 100644 index 0000000..dd9f4ec --- /dev/null +++ b/roles/openvpn/tasks/keys.yml @@ -0,0 +1,13 @@ +- name: Copy CA private key +  copy: src="ca/ca.key" dest="{{openvpn_keydir}}/" owner="root" group="root" mode="0400" + +- name: OpenSSL generate request +  command: openssl req -subj '/CN={{ ansible_hostname }}' -new -keyout "node.key" -out "node.csr" -batch -nodes chdir="{{openvpn_keydir}}" creates="{{openvpn_keydir}}/node.csr" + +- name: Generate CA serial file +  copy: content="01" dest="{{openvpn_keydir}}/ca.srl" + +- name: OpenSSL sign the request +  command: openssl x509 -req -days 3650 -in "node.csr" -CA "ca.crt" -CAkey "ca.key" -out "node.crt" chdir="{{openvpn_keydir}}" creates="{{openvpn_keydir}}/node.crt" +  notify: +    - openvpn diff --git a/roles/openvpn/tasks/main.yml b/roles/openvpn/tasks/main.yml new file mode 100644 index 0000000..df49976 --- /dev/null +++ b/roles/openvpn/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Ensure OpenVPN and OpenSSL are installed +  yum: name={{item}} state=present +  with_items: +    - openvpn +    - openssl + +- name: copy openvpn logrotate config file +  copy: src="openvpn_logrotate.conf" dest="/etc/logrotate.d/openvpn.conf" owner="root" group="root" mode="0400" + +- name: Copy CA certificate and the keys +  copy: src="{{ item }}" dest="{{openvpn_keydir}}/" owner="root" group="root" mode="0400" +  with_fileglob:  +    - ca/ca.crt +    - keys/* + +- name: Check if OpenSSL certificate is already generated +  stat: path="{{ openvpn_keydir }}/node.crt"  +  register: result + +- name: setup openvpn keys +  include: keys.yml +  when: result.stat.exists == False  + +- name: Ensure CA key is removed +  file: path="{{openvpn_keydir}}/ca.key" state=absent + +- name: setup openvpn configuration  +  include: config.yml + +- name: Ensure OpenVPN service is enabled +  service: name="{{openvpn_service}}" enabled=yes + +- name: Check if we already reconfigured SystemD Unit +  stat: path={{ item }} +  register: result +  vars: +     item: "/etc/systemd/system/{{openvpn_service}}" + +- name: Copy SystemD Unit +  copy: src="/usr/lib/systemd/system/openvpn@.service" dest="{{ item }}" remote_src=true +  vars: +    item: "/etc/systemd/system/{{openvpn_service}}" +  when: result.stat.exists == False  + +- name: Re-configure systemd to start OpenVPN after origin-node +  lineinfile: dest="/etc/systemd/system/{{openvpn_service}}" regexp="^After=" line="After=network.target origin-node.service" state=present +  notify: daemon-reload + +- name: Ensure OpenVPN service is running +  service: name="{{openvpn_service}}" state=started + +- name: Ensure firewalld is running +  service: name=firewalld state=started enabled=yes +  when: openvpn_servers in group_names + +- name: Configure firewalld +  firewalld: port="{{openvpn_port}}/tcp"  state="enabled" permanent="true" immediate="true" +  notify: +    - firewalld +  when: openvpn_servers in group_names + diff --git a/roles/openvpn/templates/katrin/ccd.j2 b/roles/openvpn/templates/katrin/ccd.j2 new file mode 100644 index 0000000..d278648 --- /dev/null +++ b/roles/openvpn/templates/katrin/ccd.j2 @@ -0,0 +1,2 @@ +ifconfig-push {{ openvpn_net | ipaddr(id | int) | ipaddr('address') }} {{ openvpn_net | ipaddr('netmask') }} +push "route 192.168.110.0 255.255.255.0 {{ openvpn_net | ipaddr(181) | ipaddr('address') }}" diff --git a/roles/openvpn/templates/katrin/ccd/ikkatrinadei.ka.fzk.de.j2 b/roles/openvpn/templates/katrin/ccd/ikkatrinadei.ka.fzk.de.j2 new file mode 100644 index 0000000..e1a786d --- /dev/null +++ b/roles/openvpn/templates/katrin/ccd/ikkatrinadei.ka.fzk.de.j2 @@ -0,0 +1,3 @@ +#ifconfig-push clientIP serverIP +ifconfig-push {{ openvpn_net | ipaddr(181) | ipaddr('address') }} {{ openvpn_net | ipaddr('netmask') }} +iroute 192.168.110.0 255.255.255.0 diff --git a/roles/openvpn/templates/katrin/ccd/ipechilinga4.ka.fzk.de.j2 b/roles/openvpn/templates/katrin/ccd/ipechilinga4.ka.fzk.de.j2 new file mode 100644 index 0000000..3673a0b --- /dev/null +++ b/roles/openvpn/templates/katrin/ccd/ipechilinga4.ka.fzk.de.j2 @@ -0,0 +1 @@ +ifconfig-push {{ openvpn_net | ipaddr(90) | ipaddr('address') }} {{ openvpn_net | ipaddr('netmask') }} diff --git a/roles/openvpn/templates/katrin/openvpn_client.j2 b/roles/openvpn/templates/katrin/openvpn_client.j2 new file mode 100644 index 0000000..a09322e --- /dev/null +++ b/roles/openvpn/templates/katrin/openvpn_client.j2 @@ -0,0 +1,24 @@ +client +remote {{openvpn_lb}} {{openvpn_port}} +proto tcp +dev tun + +topology subnet + +ca {{openvpn_keydir}}/ca.crt +cert {{openvpn_keydir}}/node.crt +key {{openvpn_keydir}}/node.key +dh {{openvpn_keydir}}/dh1024.pem + +resolv-retry infinite +keepalive 5 15 +comp-lzo +#user nobody +#group nobody +persist-key +persist-tun + +log /var/log/openvpn_{{openvpn_config}}.log +status /var/log/openvpn_{{openvpn_config}}_status.log +verb 3 + diff --git a/roles/openvpn/templates/katrin/openvpn_server.j2 b/roles/openvpn/templates/katrin/openvpn_server.j2 new file mode 100644 index 0000000..22c200d --- /dev/null +++ b/roles/openvpn/templates/katrin/openvpn_server.j2 @@ -0,0 +1,26 @@ +port {{openvpn_port}} +dev tun + +topology subnet +client-to-client +server {{ openvpn_net | ipaddr('network') }} {{ openvpn_net | ipaddr('netmask') }} +proto tcp + +ca {{openvpn_keydir}}/ca.crt +cert {{openvpn_keydir}}/node.crt +key {{openvpn_keydir}}/node.key +dh {{openvpn_keydir}}/dh1024.pem + +keepalive 10 120 +comp-lzo +#user nobody +#group nobody +persist-key +persist-tun +client-config-dir {{openvpn_ccdir}} +log /var/log/openvpn_{{openvpn_config}}.log +status /var/log/openvpn_{{openvpn_config}}_status.log +verb 3 + +route 192.168.110.0 255.255.255.0 {{ openvpn_net | ipaddr(181) | ipaddr('address') }} +  | 
