summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.redhat-ci.inventory22
-rw-r--r--.redhat-ci.yml45
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/oo_filters.py17
-rw-r--r--inventory/byo/hosts.origin.example3
-rw-r--r--inventory/byo/hosts.ose.example3
-rw-r--r--openshift-ansible.spec20
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/byo/openshift-cluster/openshift-metrics.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/README.md10
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml6
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml21
l---------playbooks/common/openshift-glusterfs/filter_plugins1
l---------playbooks/common/openshift-glusterfs/lookup_plugins1
l---------playbooks/common/openshift-glusterfs/roles1
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml6
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml51
-rw-r--r--roles/openshift_logging/templates/es.j27
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_storage_glusterfs/README.md60
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml17
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml115
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml10
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml128
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml113
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py23
-rw-r--r--roles/openshift_storage_glusterfs/meta/main.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml107
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml48
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml41
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml109
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml182
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j211
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j239
-rw-r--r--roles/openshift_version/tasks/main.yml56
-rw-r--r--setup.py48
-rw-r--r--tox.ini2
43 files changed, 1345 insertions, 32 deletions
diff --git a/.redhat-ci.inventory b/.redhat-ci.inventory
new file mode 100644
index 000000000..3c8296055
--- /dev/null
+++ b/.redhat-ci.inventory
@@ -0,0 +1,22 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+ansible_ssh_user=root
+ansible_python_interpreter=/usr/bin/python3
+deployment_type=origin
+openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
+openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
+
+[masters]
+ocp-master
+
+[etcd]
+ocp-master
+
+[nodes]
+ocp-master openshift_schedulable=false
+ocp-node1 openshift_node_labels="{'region':'infra'}"
+ocp-node2 openshift_node_labels="{'region':'infra'}"
diff --git a/.redhat-ci.yml b/.redhat-ci.yml
new file mode 100644
index 000000000..d9849ed60
--- /dev/null
+++ b/.redhat-ci.yml
@@ -0,0 +1,45 @@
+---
+
+cluster:
+ hosts:
+ - name: ocp-master
+ distro: fedora/25/atomic
+ - name: ocp-node1
+ distro: fedora/25/atomic
+ - name: ocp-node2
+ distro: fedora/25/atomic
+ container:
+ image: fedora:25
+
+packages:
+ - gcc
+ - python-pip
+ - python-devel
+ - openssl-devel
+ - redhat-rpm-config
+
+context: 'fedora/25/atomic | origin/v1.5.0-rc.0'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v1.5.0-rc.0
+
+tests:
+ - pip install ansible==2.2.2.0 # F25 currently has 2.2.1, so install from pypi
+ - ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status'
+ - ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml
+ # run a small subset of origin conformance tests to sanity check the cluster
+ # NB: we run it on the master since we may be in a different OSP network
+ - ssh ocp-master docker run --rm --net=host --privileged
+ -v /etc/origin/master/admin.kubeconfig:/config fedora:25 sh -c
+ '"dnf install -y origin-tests &&
+ KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1
+ --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
+
+---
+
+inherit: true
+
+context: 'fedora/25/atomic | origin/v3.6.0-alpha.0'
+
+env:
+ OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.0
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 081c93533..d5fb56cda 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.25-1 ./
+3.6.29-1 ./
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b550bd16a..10c8600ba 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -773,6 +773,23 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+ persistent_volumes.append(persistent_volume)
elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index a99423411..f70971537 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
#openshift_hosted_registry_storage_volume_size=10Gi
#
+# Native GlusterFS Registry Storage
+#openshift_hosted_registry_storage_kind=glusterfs
+#
# AWS S3
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 9774aa66b..f5e0de1b0 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -426,6 +426,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
#openshift_hosted_registry_storage_volume_size=10Gi
#
+# Native GlusterFS Registry Storage
+#openshift_hosted_registry_storage_kind=glusterfs
+#
# AWS S3
#
# S3 bucket must already exist.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 042717704..24ae5ba85 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.25
+Version: 3.6.29
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -76,6 +76,9 @@ find -L %{buildroot}%{_datadir}/ansible/%{name}/playbooks -name filter_plugins -
cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv role
rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
+# touch a file in contiv so that it can be added to SCM's
+touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
+
# openshift_master_facts symlinks filter_plugins/oo_filters.py from ansible_plugins/filter_plugins
pushd %{buildroot}%{_datadir}/ansible/%{name}/roles/openshift_master_facts/filter_plugins
ln -sf ../../../../../ansible_plugins/filter_plugins/oo_filters.py oo_filters.py
@@ -270,6 +273,21 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Apr 19 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.29-1
+- Create openshift-metrics entrypoint playbook (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.28-1
+- Minor v3.6 upgrade docs fixes (rteague@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.27-1
+- repo: start testing PRs on Fedora Atomic Host (jlebon@redhat.com)
+
+* Tue Apr 18 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.26-1
+- Correct role dependencies (rteague@redhat.com)
+- Allow for GlusterFS to provide registry storage (jarrpa@redhat.com)
+- Integrate GlusterFS into OpenShift installation (jarrpa@redhat.com)
+- GlusterFS playbook and role (jarrpa@redhat.com)
+
* Mon Apr 17 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.25-1
- Fix default image tag for enterprise (sdodson@redhat.com)
- Cast etcd_debug to a boolean (skuznets@redhat.com)
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index cb464cf0d..268a65415 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -13,6 +13,8 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
+
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
| union(g_lb_hosts) | union(g_nfs_hosts)
| union(g_new_node_hosts)| union(g_new_master_hosts)
diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml
new file mode 100644
index 000000000..5ad3a1a01
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml
@@ -0,0 +1,4 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/openshift_metrics.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0425ba518..0f64f40f3 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x)
-- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x)
+- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
+- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
+- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
index 930cc753c..797af671a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
@@ -1,11 +1,10 @@
-# v3.5 Major and Minor Upgrade Playbook
+# v3.6 Major and Minor Upgrade Playbook
## Overview
-This playbook currently performs the
-following steps.
+This playbook currently performs the following steps.
* Upgrade and restart master services
- * Unschedule node.
+ * Unschedule node
* Upgrade and restart docker
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
@@ -15,4 +14,7 @@ following steps.
* Updates image streams and quickstarts
## Usage
+
+```
ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+```
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 3c70db6a9..239bb211b 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -31,6 +31,10 @@
tags:
- node
+- include: ../openshift-glusterfs/config.yml
+ tags:
+ - glusterfs
+
- include: openshift_hosted.yml
tags:
- hosted
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 45a4875a3..6aac70f63 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -29,6 +29,10 @@
msg: The nfs group must be limited to one host
when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ - fail:
+ msg: This playbook requires g_glusterfs_hosts to be set
+ when: "{{ g_glusterfs_hosts is not defined }}"
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -119,3 +123,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
changed_when: no
+
+ - name: Evaluate oo_glusterfs_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_glusterfs_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_glusterfs_hosts | default([]) }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9f38ceea6..bcff4a1a1 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,4 +1,6 @@
---
+- include: evaluate_groups.yml
+
- name: OpenShift Metrics
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 3b26abcc7..4fa7f9cdf 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -130,7 +130,7 @@
state: absent
changed_when: false
-- include: ../openshift-etcd/restart.yml
+- include: ../../openshift-etcd/restart.yml
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -322,7 +322,7 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../openshift-master/restart.yml
+- include: ../../openshift-master/restart.yml
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -371,4 +371,4 @@
state: absent
changed_when: false
-- include: ../openshift-node/restart.yml
+- include: ../../openshift-node/restart.yml
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..75faf5ba8
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -0,0 +1,21 @@
+---
+- name: Open firewall ports for GlusterFS
+ hosts: oo_glusterfs_to_config
+ vars:
+ os_firewall_allow:
+ - service: glusterfs_sshd
+ port: "2222/tcp"
+ - service: glusterfs_daemon
+ port: "24007/tcp"
+ - service: glusterfs_management
+ port: "24008/tcp"
+ - service: glusterfs_bricks
+ port: "49152-49251/tcp"
+ roles:
+ - os_firewall
+
+- name: Configure GlusterFS
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index ae059b549..e4d1b57e6 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf=docker.service
[Service]
EnvironmentFile=/etc/etcd/etcd.conf
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 7edf141e5..adeb85c3f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -2155,6 +2155,10 @@ class OpenShiftFacts(object):
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
+ glusterfs=dict(
+ endpoints='glusterfs-registry-endpoints',
+ path='glusterfs-registry-volume',
+ readOnly=False),
host=None,
access=dict(
modes=['ReadWriteMany']
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 0b8042473..6e691c26f 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -109,7 +109,7 @@
type: persistentVolumeClaim
claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when:
- - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack']
+ - openshift.hosted.registry.storage.kind | default(none) in ['nfs', 'openstack', 'glusterfs']
- name: Create OpenShift registry
oc_adm_registry:
@@ -123,3 +123,7 @@
volume_mounts: "{{ openshift_hosted_registry_volumes }}"
edits: "{{ openshift_hosted_registry_edits }}"
force: "{{ True|bool in openshift_hosted_registry_force }}"
+
+- include: storage/glusterfs.yml
+ when:
+ - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs'
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
new file mode 100644
index 000000000..b18b24266
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -0,0 +1,51 @@
+---
+- name: Wait for registry pods
+ oc_obj:
+ namespace: "{{ openshift_hosted_registry_namespace }}"
+ state: list
+ kind: pod
+ selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}"
+ register: registry_pods
+ until:
+ - "registry_pods.results.results[0]['items'] | count > 0"
+ # There must be as many matching pods with 'Ready' status True as there are expected replicas
+ - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | int"
+ delay: 10
+ retries: "{{ (600 / 10) | int }}"
+
+- name: Determine registry fsGroup
+ set_fact:
+ openshift_hosted_registry_fsgroup: "{{ registry_pods.results.results[0]['items'][0].spec.securityContext.fsGroup }}"
+
+- name: Create temp mount directory
+ command: mktemp -d /tmp/openshift-glusterfs-registry-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Mount registry volume
+ mount:
+ state: mounted
+ fstype: glusterfs
+ src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ name: "{{ mktemp.stdout }}"
+
+- name: Set registry volume permissions
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: directory
+ group: "{{ openshift_hosted_registry_fsgroup }}"
+ mode: "2775"
+ recurse: True
+
+- name: Unmount registry volume
+ mount:
+ state: unmounted
+ name: "{{ mktemp.stdout }}"
+
+- name: Delete temp mount directory
+ file:
+ dest: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
index 16185fc1d..f89855bf5 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging/templates/es.j2
@@ -95,6 +95,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index cd2f362aa..2a36d8945 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -10,4 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
+- role: lib_utils
- role: openshift_common
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
new file mode 100644
index 000000000..cf0fb94c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -0,0 +1,60 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
+| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+ hosts: oo_first_master
+ roles:
+ - role: openshift_storage_glusterfs
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
new file mode 100644
index 000000000..ade850747
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: ''
+openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml
new file mode 100644
index 000000000..c9945be13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml
@@ -0,0 +1,115 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+labels:
+ template: deploy-heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ name: deploy-heketi
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ name: deploy-heketi
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ name: deploy-heketi
+ glusterfs: deploy-heketi-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: deploy-heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml
new file mode 100644
index 000000000..3f8d8f507
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-registry-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml
new file mode 100644
index 000000000..c66705752
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml
@@ -0,0 +1,128 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs: daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs-node: pod
+ template:
+ metadata:
+ name: glusterfs
+ labels:
+ glusterfs-node: pod
+ spec:
+ nodeSelector:
+ storagenode: glusterfs
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 100
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml
new file mode 100644
index 000000000..df045c170
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml
@@ -0,0 +1,113 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+labels:
+ template: heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-route
+ spec:
+ to:
+ kind: Service
+ name: heketi
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-pod
+ spec:
+ serviceAccountName: heketi-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: kubernetes
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-storage-endpoints
+ path: heketidbstorage
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+ displayName: GlusterFS container name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container versiona
+ required: True
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
new file mode 100644
index 000000000..88801e487
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Storage GlusterFS Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'map_from_pairs': map_from_pairs
+ }
diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml
new file mode 100644
index 000000000..aab9851f9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jose A. Rivera
+ description: OpenShift GlusterFS Cluster
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
new file mode 100644
index 000000000..26ca5eebf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -0,0 +1,107 @@
+---
+- assert:
+ that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+ msg: Only one GlusterFS nodeselector key pair should be provided
+
+- assert:
+ that: "groups.oo_glusterfs_to_config | count >= 3"
+ msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "template,daemonset"
+ name: glusterfs
+ state: absent
+ when: openshift_storage_glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+ oc_label:
+ name: "{{ item }}"
+ kind: node
+ state: absent
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.all }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+ file:
+ path: /var/lib/glusterd
+ state: absent
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ when: openshift_storage_glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+ command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+ register: devices_info
+ delegate_to: "{{ item }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ failed_when: False
+ when: openshift_storage_glusterfs_wipe
+
+ # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+ shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+ delegate_to: "{{ item.item }}"
+ with_items: "{{ devices_info.results }}"
+ when:
+ - openshift_storage_glusterfs_wipe
+ - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ with_items:
+ - 'default'
+ - 'router'
+
+- name: Label GlusterFS nodes
+ oc_label:
+ name: "{{ glusterfs_host }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+ with_items: "{{ groups.oo_glusterfs_to_config }}"
+ loop_control:
+ loop_var: glusterfs_host
+
+- name: Copy GlusterFS DaemonSet template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: glusterfs
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "glusterfs"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+
+- name: Wait for GlusterFS pods
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs-node=pod"
+ register: glusterfs_pods
+ until:
+ - "glusterfs_pods.results.results[0]['items'] | count > 0"
+ # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
+ - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
new file mode 100644
index 000000000..9f092d5d5
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -0,0 +1,48 @@
+---
+- name: Delete pre-existing GlusterFS registry resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "svc,ep"
+ name: "glusterfs-registry-endpoints"
+ failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+ dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: endpoints
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+ oc_obj:
+ namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+ state: present
+ kind: service
+ name: glusterfs-registry-endpoints
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+ register: registry_volume
+
+- name: Create GlusterFS registry volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+ when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
new file mode 100644
index 000000000..76ae1db75
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -0,0 +1,41 @@
+---
+- name: Copy initial heketi resource files
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "deploy-heketi-template.yml"
+
+- name: Create deploy-heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: deploy-heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "deploy-heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
new file mode 100644
index 000000000..84b85e95d
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -0,0 +1,109 @@
+---
+- name: Create heketi DB volume
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+ register: setup_storage
+ failed_when: False
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+ when: "setup_storage.rc == 0"
+
+- name: Wait for copy job to finish
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: job
+ state: list
+ name: "heketi-storage-copy-job"
+ register: heketi_job
+ until:
+ - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+ # Pod's 'Complete' status must be True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ failed_when:
+ - "'results' in heketi_job.results"
+ - "heketi_job.results.results | count > 0"
+ # Fail when pod's 'Failed' status is True
+ - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+ when: "setup_storage.rc == 0"
+
+- name: Delete deploy resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ failed_when: False
+
+- name: Copy heketi template
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+ dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: template
+ name: heketi
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+ oc_process:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ template_name: "heketi"
+ create: True
+ params:
+ IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+ IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+ HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+ HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until:
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs=heketi-service"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
new file mode 100644
index 000000000..265a3cc6e
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -0,0 +1,182 @@
+---
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+
+- name: Verify target namespace exists
+ oc_project:
+ state: present
+ name: "{{ openshift_storage_glusterfs_namespace }}"
+ when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+ when: openshift_storage_glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "template,route,service,jobs,dc,secret"
+ selector: "deploy-heketi"
+ - kind: "template,route,dc,service"
+ name: "heketi"
+ - kind: "svc,ep"
+ name: "heketi-storage-endpoints"
+ - kind: "sa"
+ name: "heketi-service-account"
+ failed_when: False
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=deploy-heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ until: "heketi_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Create heketi service account
+ oc_serviceaccount:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ name: heketi-service-account
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+ resource_kind: role
+ resource_name: edit
+ state: present
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_deploy_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: pod
+ selector: "glusterfs=heketi-pod"
+ register: heketi_pod
+ when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+ set_fact:
+ openshift_storage_glusterfs_heketi_is_missing: False
+ when:
+ - "openshift_storage_glusterfs_heketi_is_native"
+ - "heketi_pod.results.results[0]['items'] | count > 0"
+ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+ - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_deploy_is_missing
+ - openshift_storage_glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+ oc_obj:
+ namespace: "{{ openshift_storage_glusterfs_namespace }}"
+ state: list
+ kind: ep
+ selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+ register: heketi_url
+ until:
+ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+ - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+ delay: 10
+ retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+ set_fact:
+ openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+ when:
+ - openshift_storage_glusterfs_heketi_is_native
+ - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+ changed_when: False
+
+- name: Generate topology file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+ dest: "{{ mktemp.stdout }}/topology.json"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+ command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+ register: topology_load
+ failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+ when:
+ - openshift_storage_glusterfs_is_native
+ - openshift_storage_glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+ when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+
+- include: glusterfs_registry.yml
+ when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..d72d085c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-registry-endpoints
+subsets:
+- addresses:
+{% for node in groups.oo_glusterfs_to_config %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2
new file mode 100644
index 000000000..eb5b4544f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2
@@ -0,0 +1,39 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in groups.oo_glusterfs_to_config -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+ "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}"
+ ],
+ "storage": [
+ "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c3d001bb4..fa9b20e92 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -7,8 +7,13 @@
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
-- fail:
- msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+- name: Abort when we cannot safely guess what Origin image version the user wanted
+ fail:
+ msg: |-
+ To install a containerized Origin release, you must set openshift_release or
+ openshift_image_tag in your inventory to specify which version of the OpenShift
+ component images to use. You may want the latest (usually alpha) releases or
+ a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)
when:
- is_containerized | bool
- openshift.common.deployment_type == 'origin'
@@ -27,7 +32,10 @@
when: openshift_release is defined
# Verify that the image tag is in a valid format
-- block:
+- when:
+ - openshift_image_tag is defined
+ - openshift_image_tag != "latest"
+ block:
# Verifies that when the deployment type is origin the version:
# - starts with a v
@@ -35,12 +43,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers, letters, dashes and dots.
- - name: Verify Origin openshift_image_tag is valid
+ - name: (Origin) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'origin'
assert:
that:
- "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1"
- when: openshift.common.deployment_type == 'origin'
+ msg: |-
+ openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Verifies that when the deployment type is openshift-enterprise the version:
# - starts with a v
@@ -48,16 +58,14 @@
# It also allows for optional trailing data which:
# - must start with a dash
# - may contain numbers
- - name: Verify Enterprise openshift_image_tag is valid
+ - name: (Enterprise) Verify openshift_image_tag is valid
+ when: openshift.common.deployment_type == 'openshift-enterprise'
assert:
that:
- "{{ openshift_image_tag|match('(^v\\d+\\.\\d+[\\.\\d+]*(-\\d+)?$)') }}"
- msg: "openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4"
- when: openshift.common.deployment_type == 'openshift-enterprise'
-
- when:
- - openshift_image_tag is defined
- - openshift_image_tag != "latest"
+ msg: |-
+ openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3, v1.2-1, v1.2.3-4
+ You specified openshift_image_tag={{ openshift_image_tag }}
# Make sure we copy this to a fact if given a var:
- set_fact:
@@ -119,30 +127,42 @@
- fail:
msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
when: openshift_version is not defined
- fail:
msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
when: openshift_image_tag is not defined
- fail:
msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
when: openshift_pkg_version is not defined
- fail:
- msg: "No OpenShift version available, please ensure your systems are fully registered and have access to appropriate yum repositories."
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
when:
- not is_containerized | bool
- openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# We can't map an openshift_release to full rpm version like we can with containers; make sure
# the rpm version we looked up matches the release requested and error out if not.
-- fail:
- msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
+- name: For an RPM install, abort when the release requested does not match the available version.
when:
- not is_containerized | bool
- openshift_release is defined
- - not openshift_version.startswith(openshift_release) | bool
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
# The end result of these three variables is quite important so make sure they are displayed and logged:
- debug: var=openshift_release
diff --git a/setup.py b/setup.py
index 2ad26110b..c6a132ae2 100644
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@ import os
import fnmatch
import re
import sys
+import subprocess
import yaml
# Always prefer setuptools over distutils
@@ -199,6 +200,52 @@ class OpenShiftAnsibleGenerateValidation(Command):
print('\nAll generate scripts passed.\n')
+class OpenShiftAnsibleSyntaxCheck(Command):
+ ''' Command to run Ansible syntax check'''
+ description = "Run Ansible syntax check"
+ user_options = []
+
+ # Colors
+ FAIL = '\033[91m' # Red
+ ENDC = '\033[0m' # Reset
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ pass
+
+ def run(self):
+ ''' run command '''
+
+ has_errors = False
+
+ for yaml_file in find_files(
+ os.path.join(os.getcwd(), 'playbooks', 'byo'),
+ None, None, r'\.ya?ml$'):
+ with open(yaml_file, 'r') as contents:
+ for line in contents:
+ # initialize_groups.yml is used to identify entry point playbooks
+ if re.search(r'initialize_groups\.yml', line):
+ print('-' * 60)
+ print('Syntax checking playbook: %s' % yaml_file)
+ try:
+ subprocess.check_output(
+ ['ansible-playbook', '-i localhost,',
+ '--syntax-check', yaml_file]
+ )
+ except subprocess.CalledProcessError as cpe:
+ print('{}Execution failed: {}{}'.format(
+ self.FAIL, cpe, self.ENDC))
+ has_errors = True
+ # Break for loop, no need to continue looping lines
+ break
+ if has_errors:
+ raise SystemExit(1)
+
+
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
@@ -242,6 +289,7 @@ setup(
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
+ 'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
diff --git a/tox.ini b/tox.ini
index 1b02234e5..8678ff463 100644
--- a/tox.ini
+++ b/tox.ini
@@ -21,4 +21,4 @@ commands =
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml
+ ansible_syntax: python setup.py ansible_syntax