summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.papr.sh54
-rw-r--r--.papr.yml33
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_CONTAINER_IMAGE.md4
-rw-r--r--images/installer/README_INVENTORY_GENERATOR.md2
-rw-r--r--inventory/byo/hosts.example5
-rw-r--r--openshift-ansible.spec272
-rw-r--r--playbooks/aws/openshift-cluster/install.yml2
l---------playbooks/byo/openshift-checks/certificate_expiry/roles1
-rw-r--r--playbooks/byo/openshift-checks/health.yml4
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml4
-rw-r--r--playbooks/byo/openshift-preflight/check.yml3
l---------playbooks/certificate_expiry1
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml18
-rw-r--r--playbooks/openshift-checks/README.md (renamed from playbooks/byo/openshift-checks/README.md)14
-rw-r--r--playbooks/openshift-checks/adhoc.yml (renamed from playbooks/byo/openshift-checks/adhoc.yml)6
-rw-r--r--playbooks/openshift-checks/certificate_expiry/default.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/default.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/easy-mode.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)0
-rw-r--r--playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml (renamed from playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)0
l---------playbooks/openshift-checks/certificate_expiry/roles (renamed from playbooks/byo/openshift-checks/roles)0
-rw-r--r--playbooks/openshift-checks/health.yml4
-rw-r--r--playbooks/openshift-checks/pre-install.yml4
-rw-r--r--playbooks/openshift-checks/private/adhoc.yml (renamed from playbooks/common/openshift-checks/adhoc.yml)0
-rw-r--r--playbooks/openshift-checks/private/health.yml (renamed from playbooks/common/openshift-checks/health.yml)0
-rw-r--r--playbooks/openshift-checks/private/install.yml (renamed from playbooks/common/openshift-checks/install.yml)0
-rw-r--r--playbooks/openshift-checks/private/pre-install.yml (renamed from playbooks/common/openshift-checks/pre-install.yml)0
l---------playbooks/openshift-checks/private/roles (renamed from playbooks/common/openshift-checks/roles)0
l---------playbooks/openshift-checks/roles1
-rw-r--r--roles/cockpit-ui/tasks/main.yml1
-rw-r--r--roles/etcd/defaults/main.yaml6
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py2
-rw-r--r--roles/kuryr/README.md5
-rw-r--r--roles/kuryr/templates/configmap.yaml.j28
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py7
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py16
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py4
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml7
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml7
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml9
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml7
-rw-r--r--roles/openshift_logging/README.md1
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml1
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf2
-rw-r--r--roles/openshift_node/meta/main.yml6
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml (renamed from roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml)0
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/docker/upgrade.yml)0
-rw-r--r--roles/openshift_node/tasks/upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/main.yml)6
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml)2
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml (renamed from roles/openshift_node_upgrade/tasks/restart.yml)0
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml (renamed from roles/openshift_node_upgrade/tasks/rpm_upgrade.yml)0
-rw-r--r--roles/openshift_node_group/defaults/main.yml2
-rw-r--r--roles/openshift_node_group/templates/node-config.yaml.j22
-rw-r--r--roles/openshift_node_upgrade/README.md111
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml15
-rw-r--r--roles/openshift_node_upgrade/files/nuke_images.sh25
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml36
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml16
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml17
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml13
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml46
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml37
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j231
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service11
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service50
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf3
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.docker.service17
-rw-r--r--roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j21
-rw-r--r--roles/openshift_storage_glusterfs/README.md4
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml2
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml105
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j28
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j249
109 files changed, 1410 insertions, 620 deletions
diff --git a/.papr.sh b/.papr.sh
index 2d66fdacd..58b3a006f 100755
--- a/.papr.sh
+++ b/.papr.sh
@@ -1,6 +1,39 @@
#!/bin/bash
set -xeuo pipefail
+pip install requests
+
+query_github() {
+ repo=$1; shift
+ resource=$1; shift
+ python -c "
+import sys
+import requests
+j = requests.get('https://api.github.com/repos/$repo/$resource').json()
+for q in sys.argv[1:]:
+ if q.isdigit():
+ q = int(q)
+ j = j[q]
+print(j)" "$@"
+}
+
+# Essentially use a similar procedure other openshift-ansible PR tests use to
+# determine which image tag should be used. This allows us to avoid hardcoding a
+# specific version which quickly becomes stale.
+
+if [ -n "${PAPR_BRANCH:-}" ]; then
+ target_branch=$PAPR_BRANCH
+else
+ # check which branch we're targeting if we're a PR
+ target_branch=$(query_github $PAPR_REPO pulls/$PAPR_PULL_ID base ref)
+ [ -n "$target_branch" ]
+fi
+
+# this is a bit wasteful, though there's no easy way to say "only clone up to
+# the first tag in the branch" -- ideally, PAPR could help with caching here
+git clone --branch $target_branch --single-branch https://github.com/openshift/origin
+export OPENSHIFT_IMAGE_TAG=$(git -C origin describe --abbrev=0)
+
echo "Targeting OpenShift Origin $OPENSHIFT_IMAGE_TAG"
pip install -r requirements.txt
@@ -22,12 +55,15 @@ trap upload_journals ERR
# https://github.com/openshift/openshift-ansible/issues/4478 is fixed.
ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG"
-# run a small subset of origin conformance tests to sanity
-# check the cluster NB: we run it on the master since we may
-# be in a different OSP network
-ssh ocp-master docker run --rm --net=host --privileged \
- -v /etc/origin/master/admin.kubeconfig:/config \
- registry.fedoraproject.org/fedora:26 sh -c \
- '"dnf install -y origin-tests && \
- KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \
- --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
+### DISABLING TESTS FOR NOW, SEE:
+### https://github.com/openshift/openshift-ansible/pull/6132
+
+### # run a small subset of origin conformance tests to sanity
+### # check the cluster NB: we run it on the master since we may
+### # be in a different OSP network
+### ssh ocp-master docker run --rm --net=host --privileged \
+### -v /etc/origin/master/admin.kubeconfig:/config \
+### registry.fedoraproject.org/fedora:27 sh -c \
+### '"dnf install -y origin-tests && \
+### KUBECONFIG=/config /usr/libexec/origin/extended.test --ginkgo.v=1 \
+### --ginkgo.noColor --ginkgo.focus=\"Services.*NodePort|EmptyDir\""'
diff --git a/.papr.yml b/.papr.yml
index 119dd5fcf..2b30f84fc 100644
--- a/.papr.yml
+++ b/.papr.yml
@@ -14,35 +14,28 @@
cluster:
hosts:
- name: ocp-master
- distro: fedora/26/atomic
+ distro: fedora/27/atomic
specs:
ram: 4096
- name: ocp-node1
- distro: fedora/26/atomic
+ distro: fedora/27/atomic
- name: ocp-node2
- distro: fedora/26/atomic
+ distro: fedora/27/atomic
container:
- image: registry.fedoraproject.org/fedora:26
+ image: registry.fedoraproject.org/fedora:27
-# temp workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1483553
-#packages:
-# - gcc
-# - python-pip
-# - python-devel
-# - libffi-devel
-# - openssl-devel
-# - redhat-rpm-config
+packages:
+ - gcc
+ - git
+ - python-pip
+ - python-devel
+ - libffi-devel
+ - openssl-devel
+ - redhat-rpm-config
-context: 'fedora/26/atomic'
-
-env:
- OPENSHIFT_IMAGE_TAG: v3.6.0
+context: 'fedora/27/atomic'
tests:
- # temp workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1483553
- - if (dnf distro-sync -y || :) |& grep -q -e BDB1539; then
- rpm --rebuilddb; dnf distro-sync;
- fi; dnf install -y gcc python-pip python-devel libffi-devel openssl-devel redhat-rpm-config
- ./.papr.sh
artifacts:
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 88c353122..c12754594 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.8.0-0.1.0 ./
+3.8.0-0.8.0 ./
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index a2151352d..712c7c4b0 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -28,7 +28,7 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th
-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
-v /etc/ansible/hosts:/tmp/inventory \
-e INVENTORY_FILE=/tmp/inventory \
- -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml \
+ -e PLAYBOOK_FILE=playbooks/openshift-checks/certificate_expiry/default.yaml \
-e OPTS="-v" -t \
openshift/origin-ansible
@@ -44,7 +44,7 @@ Here is a detailed explanation of the options used in the command above:
* `-v /etc/ansible/hosts:/tmp/inventory` and `-e INVENTORY_FILE=/tmp/inventory` mount the Ansible inventory file into the container as `/tmp/inventory` and set the corresponding environment variable to point at it respectively. The example uses `/etc/ansible/hosts` as the inventory file as this is a default location, but your inventory is likely to be elsewhere so please adjust as needed. Note that depending on the file you point to you might have to handle SELinux labels in a similar way as with the ssh keys, e.g. by adding a `:z` flag to the volume mount, so again you might prefer to copy the inventory to a dedicated location first.
-* `-e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
+* `-e PLAYBOOK_FILE=playbooks/openshift-checks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
* `-e OPTS="-v"` and `-t` make the output look nicer: the `default.yaml` playbook does not generate results and runs quietly unless we add the `-v` option to the `ansible-playbook` invocation, and a TTY is allocated via `-t` so that Ansible adds color to the output.
diff --git a/images/installer/README_INVENTORY_GENERATOR.md b/images/installer/README_INVENTORY_GENERATOR.md
index 9c10e4b71..293bdb689 100644
--- a/images/installer/README_INVENTORY_GENERATOR.md
+++ b/images/installer/README_INVENTORY_GENERATOR.md
@@ -46,7 +46,7 @@ docker run -u `id -u` \
-v /tmp/origin/master/admin.kubeconfig:/opt/app-root/src/.kube/config:Z \
-v /tmp/origin/master/master-config.yaml:/opt/app-root/src/master-config.yaml:Z \
-e OPTS="-v --become-user root" \
- -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/health.yml \
+ -e PLAYBOOK_FILE=playbooks/openshift-checks/health.yml \
-e GENERATE_INVENTORY=true \
-e USER=`whoami` \
openshift/origin-ansible
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 1a9a5b6cf..ccdec2da1 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -401,9 +401,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# OpenShift Registry Console Options
# Override the console image prefix:
-# origin default is "cockpit/" and the image appended is "kubernetes"
-# enterprise default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
+# origin default is "cockpit/", enterprise default is "openshift3/"
#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
+# origin default is "kubernetes", enterprise default is "registry-console"
+#openshift_cockpit_deployer_basename=my-console
# Override image version, defaults to latest for origin, vX.Y product version for enterprise
#openshift_cockpit_deployer_version=1.4.1
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 76a56e5cf..c9dee2872 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.8.0
-Release: 0.1.0%{?dist}
+Release: 0.8.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -285,6 +285,276 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.8.0
+-
+
+* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.7.0
+-
+
+* Mon Nov 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.6.0
+-
+
+* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.5.0
+-
+
+* Sun Nov 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.4.0
+- bug 1498398. Enclose content between store tag (rromerom@redhat.com)
+
+* Fri Nov 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.3.0
+- papr: auto-detect image tag to use and bump to f27 (jlebon@redhat.com)
+- Updating mtu value to int (kwoodson@redhat.com)
+- fix the logging-es-prometheus selector (jcantril@redhat.com)
+- GlusterFS: Add configuration for auto creating block-hosting volumes
+ (jarrpa@redhat.com)
+- Playbook Consolidation - openshift-checks (rteague@redhat.com)
+- Combine openshift_node and openshift_node_upgrade (mgugino@redhat.com)
+- registry-console: align image and check (lmeyer@redhat.com)
+- registry-console template 3.8 consistency (lmeyer@redhat.com)
+- registry-console template 3.7 consistency (lmeyer@redhat.com)
+- registry-console template 3.6 consistency (lmeyer@redhat.com)
+
+* Thu Nov 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.2.0
+- Fix openstack init (rteague@redhat.com)
+- Ensure node service is started. (kwoodson@redhat.com)
+- Added aos-3.8 releaser for tito (smunilla@redhat.com)
+- Playbook Consolidation - Initialization (rteague@redhat.com)
+- Minor tweaks to ansible.cfg and example inventory (rteague@redhat.com)
+- Removed old version code (mgugino@redhat.com)
+- Fixing islnk. (kwoodson@redhat.com)
+- Removing setting prefix and version facts in openshift_logging to let the
+ component roles set their defaults (ewolinet@redhat.com)
+- Create prometheus configmaps before statefulset (zgalor@redhat.com)
+- Bug 1510496 - logging: honor ES PVC size (jwozniak@redhat.com)
+- Combine master upgrade play with role (mgugino@redhat.com)
+- Fix stale data in openshift_facts for logging (mgugino@redhat.com)
+- Start requiring Ansible 2.4 (rteague@redhat.com)
+- Fixing origin default for es proxy (ewolinet@redhat.com)
+- Addressing tox errors (ewolinet@redhat.com)
+- Addressing comments (ewolinet@redhat.com)
+- Initial Kuryr Ports Pool Support (ltomasbo@redhat.com)
+- Remove an unused retry file (tomas@sedovic.cz)
+- Namespace the docker volumes (tomas@sedovic.cz)
+- Fix tox (tomas@sedovic.cz)
+- Namespace the OpenStack vars (tomas@sedovic.cz)
+- Use `null` instead of `False` where it makes sense (tomas@sedovic.cz)
+- Simplify the template paths for the storage setup (tomas@sedovic.cz)
+- Use the default `item` loop variable for checks (tomas@sedovic.cz)
+- Move the selinux check up (tomas@sedovic.cz)
+- Add the DNS updates and rename the openstack vars (tomas@sedovic.cz)
+- Remove the subnet_update_dns_servers task list (tomas@sedovic.cz)
+- Move the vars/main.yml to defaults (tomas@sedovic.cz)
+- FIXUP ANSIBLE CFG (tomas@sedovic.cz)
+- Remove the static_inventory and bastion samples (tomas@sedovic.cz)
+- Use the existing ansible.cfg file (tomas@sedovic.cz)
+- Remove the subscription-manager role (tomas@sedovic.cz)
+- Add a stub of the dns record update code in (tomas@sedovic.cz)
+- Use correct host group in provision.yml (tomas@sedovic.cz)
+- Remove the post-install and scale-up playbooks (tomas@sedovic.cz)
+- Remove the openstack custom-actions for now (tomas@sedovic.cz)
+- Remove the extra roles (tomas@sedovic.cz)
+- Add openshift_openstack role and move tasks there (tomas@sedovic.cz)
+- Use the docker-storage-setup role (tomas@sedovic.cz)
+- Update readme (tomas@sedovic.cz)
+- Update lookup plugins path (tomas@sedovic.cz)
+- .gitignore casl-infra (tomas@sedovic.cz)
+- Move the OpenStack playbooks (tomas@sedovic.cz)
+- Updating logging components image defaulting pattern to match
+ openshift_logging pattern (ewolinet@redhat.com)
+- logging with static pvc: allow specifying the storage class name
+ (bart.vanbos@kbc.be)
+- Add role to configure project request template (hansmi@vshn.ch)
+- Remove bash highlight (tomas@sedovic.cz)
+- Revert the console hostname change (tomas@sedovic.cz)
+- Add Extra CAs (custom post-provision action) (#801) (tlacencin@gmail.com)
+- Add Flannel support (#814) (bdobreli@redhat.com)
+- Docker storage fix (#812) (cwilkers@redhat.com)
+- [WIP] Merge server with nofloating server heat templates (#761)
+ (bdobreli@redhat.com)
+- Support separate data network for Flannel SDN (#757) (bdobreli@redhat.com)
+- Add Extra Docker Registry URLs (custom post-provision action) (#794)
+ (tlacencin@gmail.com)
+- Make the private key examples consistent (tomas@sedovic.cz)
+- Allow the specification of server group policies when provisioning openstack
+ (#747) (tzumainn@redhat.com)
+- Attach additional RHN Pools (post-provision custom action) (#753)
+ (tlacencin@gmail.com)
+- Streamline the OpenStack provider README (tomas@sedovic.cz)
+- Adding support for cluster-autoscaler role (kwoodson@redhat.com)
+- Fix for this issue https://bugzilla.redhat.com/show_bug.cgi?id=1495372 (#793)
+ (edu@redhat.com)
+- Add CentOS support to the docker-storage-setup role (tomas@sedovic.cz)
+- Replace the CASL references (#778) (tomas@sedovic.cz)
+- Set public_v4 to private_v4 if it doesn't exist (tomas@sedovic.cz)
+- Fix flake8 errors (tomas@sedovic.cz)
+- Add dynamic inventory (tomas@sedovic.cz)
+- Fixing various contrib changes causing CASL breakage (#771)
+ (oybed@users.noreply.github.com)
+- Required variables to create dedicated lv (#766) (edu@redhat.com)
+- Adding the option to use 'stack_state' to allow for easy de-provisioning
+ (#754) (oybed@users.noreply.github.com)
+- Fix public master cluster DNS record when using bastion (#752)
+ (bdobreli@redhat.com)
+- Upscaling OpenShift application nodes (#571) (tlacencin@gmail.com)
+- load balancer formatting fix (#745) (tzumainn@redhat.com)
+- Docker ansible host (#742) (tomas@sedovic.cz)
+- Empty ssh (#729) (tomas@sedovic.cz)
+- Remove the `rhsm_register` value from inventory (tomas@sedovic.cz)
+- Make the `rhsm_register` value optional (tomas@sedovic.cz)
+- Clear the previous inventory during provisioning (tomas@sedovic.cz)
+- Fix the cinder_registry_volume conditional (tomas@sedovic.cz)
+- Pre-create a Cinder registry volume (tomas@sedovic.cz)
+- Add ability to support custom api and console ports (#712)
+ (etsauer@gmail.com)
+- Support Cinder-backed Openshift registry (#707) (tomas@sedovic.cz)
+- openstack: make server ports be trunk ports (#713) (celebdor@gmail.com)
+- Point openshift_master_cluster_public_hostname at master or lb if defined
+ (#706) (tzumainn@redhat.com)
+- Allow using a provider network (#701) (tomas@sedovic.cz)
+- Document global DNS security options (#694) (bdobreli@redhat.com)
+- Add custom post-provision playbook for adding yum repos (#697)
+ (tzumainn@redhat.com)
+- Support external/pre-provisioned authoritative cluster DNS (#690)
+ (bdobreli@redhat.com)
+- Added checks for configured images and flavors (#688) (tlacencin@gmail.com)
+- Cast num_* as int for jinja templates (#685) (bdobreli@redhat.com)
+- Do not repeat pre_tasks for post-provision playbook (#689)
+ (bdobreli@redhat.com)
+- Fix node label customisation (#679) (tlacencin@gmail.com)
+- Add documentation regarding running custom post-provision tasks (#678)
+ (tzumainn@redhat.com)
+- Add docs and defaults for multi-master setup (bdobreli@redhat.com)
+- Ignore *.cfg and *.crt in the openstack inventory (#672) (tomas@sedovic.cz)
+- Update openshift_release in the sample inventory (#647) (tomas@sedovic.cz)
+- Configure different Docker volume sizes for different roles (#644)
+ (tlacencin@gmail.com)
+- Avoid server recreation in case of user_data modification. (#651)
+ (robipolli@gmail.com)
+- Set custom hostnames for servers (#643) (tlacencin@gmail.com)
+- Access UI via a bastion node (#596) (bdobreli@redhat.com)
+- group_vars/all.yml, stack_params.yaml, README: specifying flavors enabled and
+ documented (#638) (tlacencin@gmail.com)
+- Specify different image names for roles (#637) (tlacencin@gmail.com)
+- Support multiple private networks for static inventory (#604)
+ (bdobreli@redhat.com)
+- Allow using ephemeral volumes for docker storage (#615) (tomas@sedovic.cz)
+- Remove clouds.yaml from sample-inventory (tomas@sedovic.cz)
+- Moving common DNS roles out of the playbook area (#605)
+ (oybed@users.noreply.github.com)
+- Note about jmespath requirement for control node (#599) (bdobreli@redhat.com)
+- removed openstack (djurgens@redhat.com)
+- Add wildcard pointer to Private DNS (djurgens@redhat.com)
+- Options for bastion, SSH config, static inventory autogeneration
+ (bdobreli@redhat.com)
+- Add bastion and ssh config for the static inventory role
+ (bdobreli@redhat.com)
+- Set openshift_hostname explicitly for openstack (#579) (tomas@sedovic.cz)
+- README: Added note about infra-ansible installation (#574)
+ (tlacencin@gmail.com)
+- Static inventory autogeneration (#550) (bdobreli@redhat.com)
+- Generate static inventory with shade inventory (#538) (bdobreli@redhat.com)
+- Include masters into etcd group, when it is empty (#559)
+ (bdobreli@redhat.com)
+- During provisioning, make unnecessary packages optional under a switch (#561)
+ (tlacencin@gmail.com)
+- Set ansible_become for the OSEv3 group (tomas@sedovic.cz)
+- README: fix (kpilatov@redhat.com)
+- README: typo (kpilatov@redhat.com)
+- dependencies: python-heatclient and python-openstackclient added to optional
+ dependencies (kpilatov@redhat.com)
+- README: added prerequisity for a repository needed for python-openstackclient
+ installation (kpilatov@redhat.com)
+- Add a role to generate a static inventory (#540) (bdobreli@redhat.com)
+- Retry tasks in the subscription manager role (#552) (tlacencin@gmail.com)
+- Set up NetworkManager automatically (#542) (tomas@sedovic.cz)
+- Replace greaterthan and equalto in openstack-stack (tomas@sedovic.cz)
+- Switch the sample inventory to CentOS (#541) (tomas@sedovic.cz)
+- Add defaults values for some openstack vars (#539) (tomas@sedovic.cz)
+- Install DNS roles from casl-infra with galaxy (#529) (bdobreli@redhat.com)
+- Playbook prerequisites.yml checks that prerequisites are met before
+ provisioning (#518) (tlacencin@gmail.com)
+- Persist DNS configuration for nodes for openstack provider
+ (bdobreli@redhat.com)
+- Manage packages to install/update for openstack provider
+ (bdobreli@redhat.com)
+- Fix yaml indentation (tomas@sedovic.cz)
+- Use wait_for_connection for the Heat nodes (tomas@sedovic.cz)
+- Put back node/flat secgrp for infra nodes on openstack (bdobreli@redhat.com)
+- README.md: fixing typo (kpilatov@redhat.com)
+- README.md: list jinja2 as a dependency (kpilatov@redhat.com)
+- Modify sec groups for provisioned openstack servers (bdobreli@redhat.com)
+- rename node_removal_policies, add some comments and defaults
+ (tzumainn@redhat.com)
+- all.yml: removed whitespaces in front of variables (kpilatov@redhat.com)
+- removed whitespace in front of commented variable (kpilatov@redhat.com)
+- OSEv3.yml: trailing space... (kpilatov@redhat.com)
+- OSEv3.yml: added option to ignore set hardware limits for RAM and DISK
+ (kpilatov@redhat.com)
+- Fix flat sec group and infra/dns sec rules (bdobreli@redhat.com)
+- Add node_removal_policies variable to allow for scaling down
+ (tzumainn@redhat.com)
+- Use cached facts, do not become for localhost (#484) (bdobreli@redhat.com)
+- Add profiling and skippy stdout (#470) (bdobreli@redhat.com)
+- Fix flake8 errors with the openstack inventory (tomas@sedovic.cz)
+- Fix yamllint errors (tomas@sedovic.cz)
+- Update sample inventory with the latest changes (tomas@sedovic.cz)
+- Gather facts for provision playbook (bdobreli@redhat.com)
+- Drop atomic-openshift-utils, update docs for origin (bdobreli@redhat.com)
+- Add ansible.cfg for openstack provider (bdobreli@redhat.com)
+- Add a flat sec group for openstack provider (bdobreli@redhat.com)
+- Always let the openshift nodes access the DNS (tomas@sedovic.cz)
+- Fix privileges in the pre-install playbook (tomas@sedovic.cz)
+- Add default values to provision-openstack.yml (tomas@sedovic.cz)
+- Move pre_tasks from to the openstack provisioner (tomas@sedovic.cz)
+- Add readme (tomas@sedovic.cz)
+- Add license for openstack.py in inventory (tomas@sedovic.cz)
+- Add a sample inventory for openstack provisioning (tomas@sedovic.cz)
+- Symlink roles to provisioning/openstack/roles (tomas@sedovic.cz)
+- Add a single provisioning playbook (tomas@sedovic.cz)
+- Move the openstack provisioning playbooks (tomas@sedovic.cz)
+- Update CASL to use nsupdate for DNS records (#48)
+ (oybed@users.noreply.github.com)
+- Conditionally set the openshift_master_default_subdomain to avoid overriding
+ it unecessary (#47) (oybed@users.noreply.github.com)
+- More ansible migration and deploy OCP from local workstation (#376)
+ (pschiffe@redhat.com)
+- Removed hardcoded values from ansible roles (edu@redhat.com)
+- First attempt at a simple multi-master support (#39) (etsauer@gmail.com)
+- Stack refactor (#38) (etsauer@gmail.com)
+- Ensure DNS configuration has wildcards set for infra nodes (#24)
+ (oybed@users.noreply.github.com)
+- Fixing two significant bugs in the HEAT deployment (#13) (etsauer@gmail.com)
+- update for yamllint errors (jdetiber@redhat.com)
+- Making providers common (#126) (rcook@redhat.com)
+- Openstack heat (#2) (etsauer@gmail.com)
+- Fixing ansible impl to work with OSP9 and ansible 2.2 (bedin@redhat.com)
+- Updated env_id to be a sub-domain + make the logic a bit more flexible
+ (bedin@redhat.com)
+- Fixes Issue #163 if rhsm_password is not defined (vvaldez@redhat.com)
+- Cleande up hostname role to make it more generic (bedin@redhat.com)
+- Updated to run as root rather than cloud-user, for now... (bedin@redhat.com)
+- Channging hard coded host groups to match openshift-ansible expected host
+ groups. Importing byo playbook now instead of nested ansible run. Need to
+ refactor how we generate hostnames to make it fit this. (esauer@redhat.com)
+- Subscription manager role should accomodate orgs with spaces
+ (esauer@redhat.com)
+- Reverting previous commit and making template adjustments (esauer@redhat.com)
+- Changes to allow runs from inside a container. Also allows for running
+ upstream openshift-ansible installer (esauer@redhat.com)
+- Changes by JayKayy for a full provision of OpenShift on OpenStack
+ (esauer@redhat.com)
+- Fix typo in task name (vvaldez@redhat.com)
+- Add org parameter to Satellite with user/pass (vvaldez@redhat.com)
+- Remove vars_prompt, add info to README to re-enable and for ansible-vault
+ (vvaldez@redhat.com)
+- Cosmetic changes to task names and move yum clean all to prereqs
+ (vvaldez@redhat.com)
+- Refactor use of rhsm_password to prevent display to CLI (vvaldez@redhat.com)
+- Fix bad syntax with extra 'and' in when using rhsm_pool (vvaldez@redhat.com)
+- Refactor role to dynamically determine rhsm_method (vvaldez@redhat.com)
+- Add subscription-manager support for Hosted or Satellite (vvaldez@redhat.com)
+- New OSE3 docker host builder and OpenStack ansible provisioning support
+ (andy.block@gmail.com)
+
* Wed Nov 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.8.0-0.1.0
- Allow disabling authorization migration check (sdodson@redhat.com)
- Alternative method to create docker registry auth creds (mgugino@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index a6b278fdf..87863b767 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -19,7 +19,7 @@
include: ../../init/main.yml
- name: perform the installer openshift-checks
- include: ../../common/openshift-checks/install.yml
+ include: ../../openshift-checks/private/install.yml
- name: etcd install
include: ../../common/openshift-etcd/config.yml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles
deleted file mode 120000
index 4bdbcbad3..000000000
--- a/playbooks/byo/openshift-checks/certificate_expiry/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
deleted file mode 100644
index 0034251e3..000000000
--- a/playbooks/byo/openshift-checks/health.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
deleted file mode 100644
index 1d35f601b..000000000
--- a/playbooks/byo/openshift-checks/pre-install.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: ../../init/main.yml
-
-- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
deleted file mode 100644
index 2e53452a6..000000000
--- a/playbooks/byo/openshift-preflight/check.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# location is moved; this file remains so existing instructions keep working
-- include: ../openshift-checks/pre-install.yml
diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry
deleted file mode 120000
index 9cf5334a1..000000000
--- a/playbooks/certificate_expiry
+++ /dev/null
@@ -1 +0,0 @@
-byo/openshift-checks/certificate_expiry/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index d00273ef6..ad10ba821 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,5 +1,5 @@
---
-- include: ../openshift-checks/install.yml
+- include: ../../openshift-checks/private/install.yml
- include: ../openshift-etcd/config.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index d71c96cd7..c76b3053d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -93,6 +93,14 @@
vars:
etcd_upgrade_version: '3.1.3'
+- include: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.2'
+
+- include: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.2.7'
+
- name: Upgrade fedora to latest
hosts: oo_etcd_hosts_to_upgrade
serial: 1
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 399b818a7..fa65567c2 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -317,13 +317,13 @@
delay: 60
roles:
- - lib_openshift
- openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
post_tasks:
+ - include_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
+ vars:
+ openshift_node_upgrade_in_progress: True
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index c93a5d89c..5dc8193a7 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -34,16 +34,18 @@
delay: 60
roles:
- - lib_openshift
- openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
post_tasks:
+ - include_role:
+ name: openshift_node
+ tasks_from: upgrade.yml
+ vars:
+ openshift_node_upgrade_in_progress: True
+ - include_role:
+ name: openshift_excluder
+ vars:
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Set node schedulability
oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/openshift-checks/README.md
index b26e7d7ed..0b7ea91ff 100644
--- a/playbooks/byo/openshift-checks/README.md
+++ b/playbooks/openshift-checks/README.md
@@ -47,19 +47,19 @@ against your inventory file. Here is the step-by-step:
3. Run the appropriate playbook:
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/pre-install.yml
```
or
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/health.yml
```
or
```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
+ $ ansible-playbook -i <inventory file> playbooks/openshift-checks/certificate_expiry/default.yaml -v
```
### The adhoc playbook
@@ -72,19 +72,19 @@ using the `-e` flag.
For example, to run the `docker_storage` check:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
```
To run more checks, use a comma-separated list of check names:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
```
To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml -e openshift_checks=@preflight
```
It is valid to specify multiple check tags and individual check names together
@@ -94,7 +94,7 @@ To list all of the available checks and tags, run the adhoc playbook without
setting the `openshift_checks` variable:
```console
-$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml
+$ ansible-playbook -i <inventory file> playbooks/openshift-checks/adhoc.yml
```
## Running in a container
diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml
index 4ee9e75f1..036a63776 100644
--- a/playbooks/byo/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/adhoc.yml
@@ -1,6 +1,6 @@
---
# NOTE: ideally this would be just part of a single play in
-# common/openshift-checks/adhoc.yml that lists the existing checks when
+# private/adhoc.yml that lists the existing checks when
# openshift_checks is not set or run the requested checks. However, to actually
# run the checks we need to have the included dependencies to run first and that
# takes time. To speed up listing checks, we use this separate play that runs
@@ -20,6 +20,6 @@
action: openshift_health_check
when: openshift_checks is undefined or not openshift_checks
-- include: ../../init/main.yml
+- include: ../init/main.yml
-- include: ../../common/openshift-checks/adhoc.yml
+- include: private/adhoc.yml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml b/playbooks/openshift-checks/certificate_expiry/default.yaml
index 630135cae..630135cae 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/default.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
index 378d1f154..378d1f154 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
index ae41c7c14..ae41c7c14 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
index d80cb6ff4..d80cb6ff4 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
index 2189455b7..2189455b7 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
index 87a0f3be4..87a0f3be4 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
index 960457c4b..960457c4b 100644
--- a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+++ b/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/openshift-checks/certificate_expiry/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-checks/roles
+++ b/playbooks/openshift-checks/certificate_expiry/roles
diff --git a/playbooks/openshift-checks/health.yml b/playbooks/openshift-checks/health.yml
new file mode 100644
index 000000000..64bfa411d
--- /dev/null
+++ b/playbooks/openshift-checks/health.yml
@@ -0,0 +1,4 @@
+---
+- include: ../init/main.yml
+
+- include: private/health.yml
diff --git a/playbooks/openshift-checks/pre-install.yml b/playbooks/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..410204d6a
--- /dev/null
+++ b/playbooks/openshift-checks/pre-install.yml
@@ -0,0 +1,4 @@
+---
+- include: ../init/main.yml
+
+- include: private/pre-install.yml
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/openshift-checks/private/adhoc.yml
index d0deaeb65..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/openshift-checks/private/adhoc.yml
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/openshift-checks/private/health.yml
index d0921b9d3..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/openshift-checks/private/health.yml
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/openshift-checks/private/install.yml
index 93cf6c359..93cf6c359 100644
--- a/playbooks/common/openshift-checks/install.yml
+++ b/playbooks/openshift-checks/private/install.yml
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/openshift-checks/private/pre-install.yml
index 32449d4e4..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/openshift-checks/private/pre-install.yml
diff --git a/playbooks/common/openshift-checks/roles b/playbooks/openshift-checks/private/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-checks/roles
+++ b/playbooks/openshift-checks/private/roles
diff --git a/playbooks/openshift-checks/roles b/playbooks/openshift-checks/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/openshift-checks/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 09f4259a2..f60912033 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -41,6 +41,7 @@
command: >
{{ openshift.common.client_binary }} new-app --template=registry-console
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
+ {% if openshift_cockpit_deployer_basename is defined %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}
{% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-p REGISTRY_HOST="{{ docker_registry_route.results[0].spec.host }}"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 4b734d4ed..9a3652a2b 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -6,6 +6,12 @@ r_etcd_common_backup_sufix_name: ''
r_etcd_common_etcd_runtime: "docker"
r_etcd_common_embedded_etcd: false
+osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
+etcd_image_dict:
+ origin: "registry.fedoraproject.org/f26/etcd"
+ openshift-enterprise: "{{ osm_etcd_image }}"
+etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
+
# etcd run on a host => use etcdctl command directly
# etcd run as a docker container => use docker exec
# etcd run as a runc container => use runc exec
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3e69af314..fabe66b91 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -20,7 +20,7 @@
- block:
- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
+ command: docker pull {{ etcd_image }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f71d9b551..82ac4fc84 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -5,7 +5,7 @@
tasks_from: proxy
- name: Pull etcd system container
- command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
+ command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
@@ -57,7 +57,7 @@
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
- image: "{{ openshift.etcd.etcd_image }}"
+ image: "{{ etcd_image }}"
state: latest
values:
- ETCD_DATA_DIR=/var/lib/etcd
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..99ae37319 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 723e43e8d..14fdca400 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -46,7 +46,7 @@ class CallbackModule(CallbackBase):
},
'installer_phase_health': {
'title': 'Health Check',
- 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ 'playbook': 'playbooks/openshift-checks/pre-install.yml'
},
'installer_phase_etcd': {
'title': 'etcd Install',
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
index 7b618f902..269788a11 100644
--- a/roles/kuryr/README.md
+++ b/roles/kuryr/README.md
@@ -31,6 +31,11 @@ pods. This allows to have interconnectivity between pods and OpenStack VMs.
* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
* ``kuryr_openstack_pod_project_id=pod_project_uuid``
* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+* ``kuryr_openstack_enable_pools=True``
+* ``kuryr_openstack_pool_max=0``
+* ``kuryr_openstack_pool_min=1``
+* ``kuryr_openstack_pool_batch=5``
+* ``kuryr_openstack_pool_update_frequency=20``
## Kuryr resources
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
index e874d6c25..6bf6c1db2 100644
--- a/roles/kuryr/templates/configmap.yaml.j2
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -161,6 +161,14 @@ data:
# The driver that provides VIFs for Kubernetes Pods. (string value)
pod_vif_driver = nested-vlan
+ # The driver that manages VIFs pools for Kubernetes Pods (string value)
+ vif_pool_driver = {{ kuryr_openstack_enable_pools | default(False) | ternary('nested', 'noop') }}
+
+ [vif_pool]
+ ports_pool_max = {{ kuryr_openstack_pool_max | default(0) }}
+ ports_pool_min = {{ kuryr_openstack_pool_min | default(1) }}
+ ports_pool_batch = {{ kuryr_openstack_pool_batch | default(5) }}
+ ports_pool_update_frequency = {{ kuryr_openstack_pool_update_frequency | default(20) }}
[neutron]
# Configuration options for OpenStack Neutron
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index f19a421cb..48338ca1b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
+> [/playbooks/openshift-checks/certificate_expiry/](../../playbooks/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/by
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index 22fb39006..ed97d539c 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1,5 +1 @@
---
-- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 99ebb7e36..f94e0e097 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1563,7 +1563,8 @@ def set_builddefaults_facts(facts):
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+ if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1630,7 +1631,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1640,7 +1640,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1667,8 +1666,6 @@ def set_container_facts_if_unset(facts):
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
- if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
- facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 587c6f85c..4f91f6bb3 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -12,15 +12,15 @@ DEPLOYMENT_IMAGE_INFO = {
"origin": {
"namespace": "openshift",
"name": "origin",
- "registry_console_template": "${prefix}kubernetes:${version}",
"registry_console_prefix": "cockpit/",
+ "registry_console_basename": "kubernetes",
"registry_console_default_version": "latest",
},
"openshift-enterprise": {
"namespace": "openshift3",
"name": "ose",
- "registry_console_template": "${prefix}registry-console:${version}",
- "registry_console_prefix": "registry.access.redhat.com/openshift3/",
+ "registry_console_prefix": "openshift3/",
+ "registry_console_basename": "registry-console",
"registry_console_default_version": "${short_version}",
},
}
@@ -156,7 +156,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
- required.add(self._registry_console_image(image_tag, image_info))
+ if self.get_var("osm_use_cockpit", default=True, convert=bool):
+ required.add(self._registry_console_image(image_tag, image_info))
# images for containerized components
if self.get_var("openshift", "common", "is_containerized"):
@@ -180,6 +181,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"openshift_cockpit_deployer_prefix",
default=image_info["registry_console_prefix"],
)
+ basename = self.get_var(
+ "openshift_cockpit_deployer_basename",
+ default=image_info["registry_console_basename"],
+ )
# enterprise template just uses v3.6, v3.7, etc
match = re.match(r'v\d+\.\d+', image_tag)
@@ -187,8 +192,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
version = image_info["registry_console_default_version"].replace("${short_version}", short_version)
version = self.get_var("openshift_cockpit_deployer_version", default=version)
- template = image_info["registry_console_template"]
- return template.replace('${prefix}', prefix).replace('${version}', version)
+ return prefix + basename + ':' + version
def local_images(self, images):
"""Filter a list of images and return those available locally."""
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 484aa72e0..ec46c3b4b 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -217,7 +217,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
'foo.io/openshift3/ose-docker-registry:f13ac45',
'foo.io/openshift3/ose-haproxy-router:f13ac45',
# registry-console is not constructed/versioned the same as the others.
- 'registry.access.redhat.com/openshift3/registry-console:vtest',
+ 'openshift3/registry-console:vtest',
# containerized images aren't built from oreg_url
'openshift3/node:vtest',
'openshift3/openvswitch:vtest',
@@ -261,7 +261,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_deployment_type="openshift-enterprise",
openshift_image_tag="vtest",
),
- "registry.access.redhat.com/openshift3/registry-console:vtest",
+ "openshift3/registry-console:vtest",
), (
dict(
openshift_deployment_type="openshift-enterprise",
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index f821efd6b..cc3159a32 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.6", set version "v3.6"'
name: IMAGE_VERSION
value: "v3.6"
diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 019d836fe..9f2e6125d 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.7", set version "v3.7"'
name: IMAGE_VERSION
value: "v3.7"
diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index 5acbb02b3..f04ce06d3 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -102,7 +102,10 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: "openshift3/"
+ - description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
+ name: IMAGE_BASENAME
+ value: "registry-console"
- description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.8", set version "v3.8"'
name: IMAGE_VERSION
value: "v3.8"
diff --git a/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
index a78146ca4..a75340eb7 100644
--- a/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/origin/registry-console.yaml
@@ -27,7 +27,7 @@ objects:
spec:
containers:
- name: registry-console
- image: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ image: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
ports:
- containerPort: 9090
protocol: TCP
@@ -89,7 +89,7 @@ objects:
- annotations: null
from:
kind: DockerImage
- name: ${IMAGE_PREFIX}kubernetes:${IMAGE_VERSION}
+ name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
name: ${IMAGE_VERSION}
- kind: OAuthClient
apiVersion: v1
@@ -103,6 +103,9 @@ parameters:
- description: 'Specify "registry/namespace" prefix for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", set prefix "registry.example.com/cockpit/"'
name: IMAGE_PREFIX
value: "cockpit/"
+ - description: 'Specify component name for container image; e.g. for "registry.example.com/cockpit/kubernetes:latest", use base name "kubernetes"'
+ name: IMAGE_BASENAME
+ value: "kubernetes"
- description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
name: IMAGE_VERSION
value: latest
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 6c5bb8693..27cfc17d6 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -84,6 +84,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_es_pvc_storage_class_name`: The name of the storage class to use for a static PVC. Defaults to ''.
- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 89e583771..2fefdc894 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -84,6 +84,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
_es_containers: "{{ outer_item.0.containers}}"
@@ -110,6 +111,7 @@
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
loop_control:
@@ -146,6 +148,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
@@ -187,6 +190,7 @@
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
+ openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_ops_pvc_storage_class_name }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index bec4432c3..0ea913224 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -31,6 +31,7 @@ openshift_logging_elasticsearch_pvc_name: ""
openshift_logging_elasticsearch_pvc_size: ""
openshift_logging_elasticsearch_pvc_dynamic: false
openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_storage_class_name: ""
openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_elasticsearch_storage_group: ['65534']
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 2bd02af60..770892d52 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -270,7 +270,7 @@
port: 443
targetPort: 4443
selector:
- component: "{{ es_component }}-prometheus"
+ component: "{{ es_component }}"
provider: openshift
- oc_edit:
diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_fluentd/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_mux/files/secure-forward.conf
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 5bc7b9869..c32aa1600 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,9 +13,15 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_node_facts
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_openshift
- role: lib_os_firewall
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_clock
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_docker
- role: openshift_cloud_provider
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_node_dnsmasq
+- role: lib_utils
+ when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
index f92ff79b5..f92ff79b5 100644
--- a/roles/openshift_node_upgrade/tasks/config/install-node-docker-service-file.yml
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
index ebe87d6fd..ebe87d6fd 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node/tasks/upgrade.yml
index 66c1fcc38..2bca1e974 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -59,7 +59,7 @@
- include: "{{ node_config_hook }}"
when: node_config_hook is defined
-- include: rpm_upgrade.yml
+- include: upgrade/rpm_upgrade.yml
vars:
component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
@@ -70,7 +70,7 @@
path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
state: absent
-- include: containerized_node_upgrade.yml
+- include: upgrade/containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
- name: Ensure containerized services stopped before Docker restart
@@ -165,7 +165,7 @@
value: "/etc/origin/node/resolv.conf"
# Restart all services
-- include: restart.yml
+- include: upgrade/restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
index 07b0ac715..96b94d8b6 100644
--- a/roles/openshift_node_upgrade/tasks/containerized_node_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -6,7 +6,7 @@
skip_node_svc_handlers: True
- name: Update systemd units
- include: systemd_units.yml
+ include: ../systemd_units.yml
# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
# play when the node has already been marked schedulable again. (this would look strange
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
index a4fa51172..a4fa51172 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
index a998acf21..a998acf21 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index d398a7fdc..7c81409a5 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -23,4 +23,4 @@ openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | de
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
openshift_node_group_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_default }}"
-openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) }}"
+openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) | int }}"
diff --git a/roles/openshift_node_group/templates/node-config.yaml.j2 b/roles/openshift_node_group/templates/node-config.yaml.j2
index 5e22dc6d2..3fd16247c 100644
--- a/roles/openshift_node_group/templates/node-config.yaml.j2
+++ b/roles/openshift_node_group/templates/node-config.yaml.j2
@@ -33,7 +33,7 @@ masterClientConnectionOverrides:
qps: 20
masterKubeConfig: node.kubeconfig
networkConfig:
- mtu: "{{ openshift_node_group_network_mtu }}"
+ mtu: {{ openshift_node_group_network_mtu }}
networkPluginName: {{ openshift_node_group_network_plugin }}
nodeIP: ""
podManifestConfig: null
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
deleted file mode 100644
index 73b98ad90..000000000
--- a/roles/openshift_node_upgrade/README.md
+++ /dev/null
@@ -1,111 +0,0 @@
-OpenShift Node upgrade
-=========
-
-Role responsible for a single node upgrade.
-It is expected a node is functioning and a part of an OpenShift cluster.
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-From this role:
-
-| Name | Default value | |
-|--------------------------------|-----------------------|--------------------------------------------------------|
-| deployment_type | | Inventory var |
-| docker_upgrade_nuke_images | | Optional inventory var |
-| docker_version | | Optional inventory var |
-| l_docker_upgrade | | |
-| node_config_hook | | |
-| openshift.docker.gte_1_10 | | |
-| openshift_image_tag | | Set by openshift_version role |
-| openshift_pkg_version | | Set by openshift_version role |
-| openshift_release | | Set by openshift_version role |
-| skip_docker_restart | | |
-| openshift_cloudprovider_kind | | |
-
-From openshift.common:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.hostname |---------------------|---------------------|
-| openshift.common.http_proxy |---------------------|---------------------|
-| openshift.common.is_atomic |---------------------|---------------------|
-| openshift.common.is_containerized |---------------------|---------------------|
-| openshift.common.portal_net |---------------------|---------------------|
-| openshift.common.service_type |---------------------|---------------------|
-
-From openshift.master:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.master.api_port |---------------------|---------------------|
-
-From openshift.node:
-
-| Name | Default Value | |
-|------------------------------------|---------------------|---------------------|
-| openshift.node.node_image |---------------------|---------------------|
-| openshift.node.ovs_image |---------------------|---------------------|
-
-
-Dependencies
-------------
-
-
-TODO
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
-```
----
-- name: Upgrade nodes
- hosts: oo_nodes_to_upgrade
- serial: 1
- any_errors_fatal: true
-
- pre_tasks:
- - name: Mark unschedulable
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
-
- - name: Drain Node for Kubelet upgrade
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
- delegate_to: "{{ groups.oo_first_master.0 }}"
- register: l_docker_upgrade_drain_result
- until: not l_docker_upgrade_drain_result | failed
- retries: 60
- delay: 60
-
-
- roles:
- - openshift_facts
- - docker
- - openshift_node_dnsmasq
- - openshift_node_upgrade
-
- post_tasks:
- - name: Set node schedulability
- command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
deleted file mode 100644
index 1da434e6f..000000000
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-openshift_node_debug_level: "{{ debug_level | default(2) }}"
-
-openshift_use_openshift_sdn: True
-os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
-
-openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
-openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
-
-# oreg_url is defined by user input
-oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
-oreg_auth_credentials_replace: False
-l_bind_docker_reg_auth: False
-openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
diff --git a/roles/openshift_node_upgrade/files/nuke_images.sh b/roles/openshift_node_upgrade/files/nuke_images.sh
deleted file mode 100644
index 8635eab0d..000000000
--- a/roles/openshift_node_upgrade/files/nuke_images.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
- docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
- docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
- # Some layers are deleted recursively and are no longer present
- # when docker goes to remove them:
- docker rmi -f `docker images -aq` || true
-fi
-
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
deleted file mode 100644
index 90d80855e..000000000
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- name: restart openvswitch
- systemd:
- name: openvswitch
- state: restarted
- when:
- - not skip_node_svc_handlers | default(False) | bool
- - not (ovs_service_status_changed | default(false) | bool)
- - openshift_use_openshift_sdn | bool
- register: l_openshift_node_upgrade_stop_openvswitch_result
- until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
- retries: 3
- delay: 30
- notify:
- - restart openvswitch pause
-
-- name: restart openvswitch pause
- pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
-
-- name: restart node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- state: restarted
- register: l_openshift_node_upgrade_restart_node_result
- until: not l_openshift_node_upgrade_restart_node_result | failed
- retries: 3
- delay: 30
- when:
- - (not skip_node_svc_handlers | default(False) | bool)
- - not (node_service_status_changed | default(false) | bool)
-
-# TODO(jchaloup): once it is verified the systemd module works as expected
-# switch to it: http://docs.ansible.com/ansible/latest/systemd_module.html
-- name: reload systemd units
- command: systemctl daemon-reload
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
deleted file mode 100644
index a810b01dc..000000000
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: your name
- description: OpenShift Node upgrade
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 2.1
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_utils
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
deleted file mode 100644
index 527580481..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Configure Node settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
deleted file mode 100644
index d60794305..000000000
--- a/roles/openshift_node_upgrade/tasks/config/configure-proxy-settings.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Configure Proxy Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
- notify:
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
deleted file mode 100644
index ee91a88ab..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-node-deps-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node dependencies docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
- src: openshift.docker.node.dep.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
deleted file mode 100644
index c2c5ea1d4..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install OpenvSwitch docker service file
- template:
- dest: "/etc/systemd/system/openvswitch.service"
- src: openvswitch.docker.service
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml b/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
deleted file mode 100644
index 1d75a3355..000000000
--- a/roles/openshift_node_upgrade/tasks/config/install-ovs-service-env-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create the openvswitch service env file
- template:
- src: openvswitch.sysconfig.j2
- dest: /etc/sysconfig/openvswitch
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml b/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
deleted file mode 100644
index 5df1abc79..000000000
--- a/roles/openshift_node_upgrade/tasks/config/workaround-bz1331590-ovs-oom-fix.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# May be a temporary workaround.
-# https://bugzilla.redhat.com/show_bug.cgi?id=1331590
-- name: Create OpenvSwitch service.d directory
- file: path=/etc/systemd/system/openvswitch.service.d/ state=directory
-
-- name: Install OpenvSwitch service OOM fix
- template:
- dest: "/etc/systemd/system/openvswitch.service.d/01-avoid-oom.conf"
- src: openvswitch-avoid-oom.conf
- notify:
- - reload systemd units
- - restart openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
deleted file mode 100644
index f5428867a..000000000
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ oreg_auth_credentials_path }}"
- when: oreg_auth_user is defined
- register: node_oreg_auth_credentials_stat
-
-- name: Create credentials for registry auth
- command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - not (openshift_docker_alternative_creds | default(False))
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- retries: 3
- delay: 5
- until: node_oreg_auth_credentials_create.rc == 0
- notify:
- - restart node
-
-# docker_creds is a custom module from lib_utils
-# 'docker login' requires a docker.service running on the local host, this is an
-# alternative implementation for non-docker hosts. This implementation does not
-# check the registry to determine whether or not the credentials will work.
-- name: Create credentials for registry auth (alternative)
- docker_creds:
- path: "{{ oreg_auth_credentials_path }}"
- registry: "{{ oreg_host }}"
- username: "{{ oreg_auth_user }}"
- password: "{{ oreg_auth_password }}"
- when:
- - openshift_docker_alternative_creds | bool
- - oreg_auth_user is defined
- - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
- register: node_oreg_auth_credentials_create
- notify:
- - restart node
-
-# Container images may need the registry credentials
-- name: Setup ro mount of /root/.docker for containerized hosts
- set_fact:
- l_bind_docker_reg_auth: True
- when:
- - openshift.common.is_containerized | bool
- - oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
deleted file mode 100644
index 226f5290c..000000000
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# input variables
-# - openshift.node.node_image
-# - openshift_image_tag
-# - openshift.common.is_containerized
-# - openshift.node.ovs_image
-# - openshift_use_openshift_sdn
-# - openshift.common.service_type
-# - openshift_node_debug_level
-# - openshift.common.config_base
-# - openshift.common.http_proxy
-# - openshift.common.portal_net
-# - openshift.common
-# - openshift.common.http_proxy
-# notify:
-# - restart openvswitch
-# - restart node
-
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
-- include: config/install-node-deps-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-node-docker-service-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/install-ovs-service-env-file.yml
- when: openshift.common.is_containerized | bool
-
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift_use_openshift_sdn | bool
-
-- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
-
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
deleted file mode 100644
index e12a52c15..000000000
--- a/roles/openshift_node_upgrade/templates/node.service.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-[Unit]
-Description=OpenShift Node
-After={{ openshift.docker.service_name }}.service
-Wants=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-Wants={{ openshift.docker.service_name }}.service
-Documentation=https://github.com/openshift/origin
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-Environment=GOTRACEBACK=crash
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
-LimitNOFILE=65536
-LimitCORE=infinity
-WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-TimeoutStartSec=300
-OOMScoreAdjust=-999
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
deleted file mode 100644
index aae35719c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.common.service_type }}-node.service
-Before={{ openshift.common.service_type }}-node.service
-
-
-[Service]
-ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
-ExecStop=
-SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
deleted file mode 100644
index 07d1ebc3c..000000000
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ /dev/null
@@ -1,50 +0,0 @@
-[Unit]
-After={{ openshift.common.service_type }}-master.service
-After={{ openshift.docker.service_name }}.service
-After=openvswitch.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-{% if openshift_use_openshift_sdn %}
-Wants=openvswitch.service
-PartOf=openvswitch.service
-After=ovsdb-server.service
-After=ovs-vswitchd.service
-{% endif %}
-Wants={{ openshift.common.service_type }}-master.service
-Requires={{ openshift.common.service_type }}-node-dep.service
-After={{ openshift.common.service_type }}-node-dep.service
-Requires=dnsmasq.service
-After=dnsmasq.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
-ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
-ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
- --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
- -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
- -e HOST=/rootfs -e HOST_ETC=/host-etc \
- -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
- -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
- {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
- -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
- -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw \
- -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker \
- -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \
- -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \
- -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \
- -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \
- {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\
- {{ openshift.node.node_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 10
-ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
-ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
-ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-SyslogIdentifier={{ openshift.common.service_type }}-node
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf b/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
deleted file mode 100644
index 3229bc56b..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch-avoid-oom.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# Avoid the OOM killer for openvswitch and it's children:
-[Service]
-OOMScoreAdjust=-1000
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.docker.service b/roles/openshift_node_upgrade/templates/openvswitch.docker.service
deleted file mode 100644
index 34aaaabd6..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.docker.service
+++ /dev/null
@@ -1,17 +0,0 @@
-[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
-
-[Service]
-EnvironmentFile=/etc/sysconfig/openvswitch
-ExecStartPre=-/usr/bin/docker rm -f openvswitch
-ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION}
-ExecStartPost=/usr/bin/sleep 5
-ExecStop=/usr/bin/docker stop openvswitch
-SyslogIdentifier=openvswitch
-Restart=always
-RestartSec=5s
-
-[Install]
-WantedBy={{ openshift.docker.service_name }}.service
diff --git a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2 b/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
deleted file mode 100644
index da7c3742a..000000000
--- a/roles/openshift_node_upgrade/templates/openvswitch.sysconfig.j2
+++ /dev/null
@@ -1 +0,0 @@
-IMAGE_VERSION={{ openshift_image_tag }}
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 03c157313..54adcf78d 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -87,7 +87,9 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_max_host_vol | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index c3db36d37..814d6ff28 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -10,7 +10,9 @@ openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
-openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_block_host_vol_create: True
+openshift_storage_glusterfs_block_host_vol_size: 100
+openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -54,7 +56,9 @@ openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_ve
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
-openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
index 2cc69644c..9c1409dee 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
@@ -2,7 +2,7 @@
kind: Template
apiVersion: v1
metadata:
- name: glusterblock
+ name: glusterblock-provisioner
labels:
glusterfs: block-template
glusterblock: template
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
new file mode 100644
index 000000000..9c1409dee
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ image: gluster/glusterblock-provisioner:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index bba1de654..d6be8c726 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -29,21 +29,21 @@
src: "{{ openshift.common.examples_content_version }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "glusterblock-template.yml"
+ - "glusterblock-provisioner.yml"
- name: Create glusterblock provisioner template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: "glusterblock"
+ name: "glusterblock-provisioner"
state: present
files:
- - "{{ mktemp.stdout }}/glusterblock-template.yml"
+ - "{{ mktemp.stdout }}/glusterblock-provisioner.yml"
- name: Deploy glusterblock provisioner
oc_process:
namespace: "{{ glusterfs_namespace }}"
- template_name: "glusterblock"
+ template_name: "glusterblock-provisioner"
create: True
params:
IMAGE_NAME: "{{ glusterfs_block_image }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index e2d740f35..1ede0ae94 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index f98d4c62f..ef37762f9 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -87,7 +87,7 @@
IMAGE_VERSION: "{{ glusterfs_version }}"
NODE_LABELS: "{{ glusterfs_nodeselector }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
+ GB_GLFS_LRU_COUNT: "{{ glusterfs_block_host_vol_max }}"
- name: Wait for GlusterFS pods
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index baac52179..1fa42efa7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
index 579b11bb7..565e9be98 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -31,6 +31,12 @@
"port" : "{{ glusterfs_heketi_ssh_port }}",
"user" : "{{ glusterfs_heketi_ssh_user }}",
"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
- }
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
}
}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}