summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xhack/build-images.sh87
-rwxr-xr-xhack/push-release.sh55
-rw-r--r--inventory/byo/hosts.origin.example48
-rw-r--r--inventory/byo/hosts.ose.example46
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml14
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml3
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
-rw-r--r--roles/contiv/defaults/main.yml15
-rw-r--r--roles/contiv/files/loopbackbin3909976 -> 0 bytes
-rw-r--r--roles/contiv/tasks/download_bins.yml19
-rw-r--r--roles/contiv/tasks/netplugin.yml3
-rw-r--r--roles/docker/templates/custom.conf.j22
-rw-r--r--roles/etcd/tasks/etcdctl.yml2
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml2
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py11
-rw-r--r--roles/lib_openshift/src/class/oc_objectvalidator.py11
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py2
-rw-r--r--roles/openshift_excluder/README.md17
-rw-r--r--roles/openshift_excluder/defaults/main.yml6
-rw-r--r--roles/openshift_excluder/meta/main.yml1
-rw-r--r--roles/openshift_excluder/tasks/adjust.yml23
-rw-r--r--roles/openshift_excluder/tasks/disable.yml26
-rw-r--r--roles/openshift_excluder/tasks/enable.yml21
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml27
-rw-r--r--roles/openshift_excluder/tasks/init.yml12
-rw-r--r--roles/openshift_excluder/tasks/install.yml31
-rw-r--r--roles/openshift_excluder/tasks/reset.yml12
-rw-r--r--roles/openshift_excluder/tasks/status.yml104
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml25
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py9
-rw-r--r--roles/openshift_facts/tasks/main.yml9
-rw-r--r--roles/openshift_facts/vars/main.yml5
-rw-r--r--roles/openshift_logging/defaults/main.yml29
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py9
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml6
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml4
-rw-r--r--roles/openshift_logging/templates/fluentd.j22
-rw-r--r--roles/openshift_metrics/defaults/main.yaml7
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml8
56 files changed, 735 insertions, 133 deletions
diff --git a/hack/build-images.sh b/hack/build-images.sh
new file mode 100755
index 000000000..f6210e239
--- /dev/null
+++ b/hack/build-images.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+source_root=$(dirname "${0}")/..
+
+prefix="openshift/openshift-ansible"
+version="latest"
+verbose=false
+options=""
+help=false
+
+for args in "$@"
+do
+ case $args in
+ --prefix=*)
+ prefix="${args#*=}"
+ ;;
+ --version=*)
+ version="${args#*=}"
+ ;;
+ --no-cache)
+ options="${options} --no-cache"
+ ;;
+ --verbose)
+ verbose=true
+ ;;
+ --help)
+ help=true
+ ;;
+ esac
+done
+
+# allow ENV to take precedent over switches
+prefix="${PREFIX:-$prefix}"
+version="${OS_TAG:-$version}"
+
+if [ "$help" = true ]; then
+ echo "Builds the docker images for openshift-ansible"
+ echo
+ echo "Options: "
+ echo " --prefix=PREFIX"
+ echo " The prefix to use for the image names."
+ echo " default: openshift/openshift-ansible"
+ echo
+ echo " --version=VERSION"
+ echo " The version used to tag the image"
+ echo " default: latest"
+ echo
+ echo " --no-cache"
+ echo " If set will perform the build without a cache."
+ echo
+ echo " --verbose"
+ echo " Enables printing of the commands as they run."
+ echo
+ echo " --help"
+ echo " Prints this help message"
+ echo
+ exit 0
+fi
+
+if [ "$verbose" = true ]; then
+ set -x
+fi
+
+BUILD_STARTTIME=$(date +%s)
+comp_path=$source_root/
+docker_tag=${prefix}:${version}
+echo
+echo
+echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---"
+docker build ${options} -t $docker_tag $comp_path
+BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
+echo
+echo
+
+echo
+echo
+echo "++ Active images"
+docker images | grep ${prefix} | grep ${version} | sort
+echo
+
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/hack/push-release.sh b/hack/push-release.sh
new file mode 100755
index 000000000..8639143af
--- /dev/null
+++ b/hack/push-release.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# This script pushes all of the built images to a registry.
+#
+# Set OS_PUSH_BASE_REGISTRY to prefix the destination images
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+STARTTIME=$(date +%s)
+OS_ROOT=$(dirname "${BASH_SOURCE}")/..
+
+PREFIX="${PREFIX:-openshift/openshift-ansible}"
+
+# Go to the top of the tree.
+cd "${OS_ROOT}"
+
+# Allow a release to be repushed with a tag
+tag="${OS_PUSH_TAG:-}"
+if [[ -n "${tag}" ]]; then
+ tag=":${tag}"
+else
+ tag=":latest"
+fi
+
+# Source tag
+source_tag="${OS_TAG:-}"
+if [[ -z "${source_tag}" ]]; then
+ source_tag="latest"
+fi
+
+images=(
+ ${PREFIX}
+)
+
+PUSH_OPTS=""
+if docker push --help | grep -q force; then
+ PUSH_OPTS="--force"
+fi
+
+if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then
+ set -e
+ for image in "${images[@]}"; do
+ docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+ done
+ set +e
+fi
+
+for image in "${images[@]}"; do
+ docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
+done
+
+ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 7741730ad..6dec97fda 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -89,6 +89,8 @@ openshift_release=v1.4
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
# Upgrade Hooks
#
@@ -300,7 +302,51 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
-
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
+#
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 3da9be081..2b61e7d8d 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -89,6 +89,8 @@ openshift_release=v3.4
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+# Specify exact version of etcd to configure or upgrade to.
+# etcd_version="3.1.0"
# Upgrade Hooks
#
@@ -300,6 +302,50 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index b1510e062..d268850d8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b61d9e58a..d11e51640 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index f0b2a2c75..5a0f143ac 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 82a1d0935..25d8cd2ba 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7ae1b3e6e..d52f3c111 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ec63ea60e..07c734a40 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 69cabcd33..e4db65b02 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
@@ -82,6 +82,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 719057d2b..a2f1cd2b1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
@@ -90,6 +90,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 259be6f8e..f858de3d5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 113b401f9..82f711f40 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -27,6 +27,9 @@
when: openshift_docker_selinux_enabled is not defined
- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
tags:
- always
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
index eb146bab8..b2e025cb8 100644
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/disable_excluder.yml
@@ -3,9 +3,15 @@
hosts: l_oo_all_hosts
gather_facts: no
tasks:
+
+ # During installation the excluders are installed with present state.
+ # So no pre-validation check here as the excluders are either to be installed (present = latest)
+ # or they are not going to be updated if already installed
+
+ # disable excluders based on their status
- include_role:
name: openshift_excluder
- tasks_from: status
- - include_role:
- name: openshift_excluder
- tasks_from: unexclude
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: present
+ docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 6b40176e1..7f37c606f 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -19,6 +19,9 @@
when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
tags:
- always
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
index fe86f4c23..7c544ee32 100644
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ b/playbooks/common/openshift-cluster/reset_excluder.yml
@@ -5,4 +5,4 @@
tasks:
- include_role:
name: openshift_excluder
- tasks_from: reset
+ tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
new file mode 100644
index 000000000..2a85dc92e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
@@ -0,0 +1,21 @@
+---
+- name: Record excluder state and disable
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include: pre/validate_excluder.yml
+ vars:
+ #repoquery_cmd: repoquery_cmd
+ #openshift_upgrade_target: openshift_upgrade_target
+ excluder: "{{ item }}"
+ with_items:
+ - "{{ openshift.common.service_type }}-docker-excluder"
+ - "{{ openshift.common.service_type }}-excluder"
+
+ # disable excluders based on their status
+ - include_role:
+ name: openshift_excluder
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: latest
+ docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
new file mode 100644
index 000000000..5078638b7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
@@ -0,0 +1,22 @@
+---
+# input variables:
+# - repoquery_cmd
+# - excluder
+# - openshift_upgrade_target
+- name: Get available excluder version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
+ register: excluder_version
+ failed_when: false
+ changed_when: false
+
+- name: Docker excluder version detected
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version.stdout }}"
+
+- name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}"
+ when:
+ - "{{ excluder_version.stdout != '' }}"
+ - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target, '>', strict=True) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
new file mode 100644
index 000000000..9c126033c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
@@ -0,0 +1,67 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+#
+# oc_objectvalidator provides these two checks
+# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
+# https://github.com/openshift/origin/issues/12697
+# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
+#
+###############################################################################
+- name: Verify 3.5 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ # What's all this PetSet business about?
+ #
+ # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
+ # no longer supported. The BETA resource 'StatefulSets' replaces
+ # them. We can't migrate clients PetSets to
+ # StatefulSets. Additionally, Red Hat has never officially supported
+ # these resource types. Sorry users, but if you were using
+ # unsupported resources from the Kube documentation then we can't
+ # help you at this time.
+ #
+ # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
+ - name: Check if legacy PetSets exist
+ oc_obj:
+ state: list
+ all_namespaces: true
+ kind: petsets
+ register: l_do_petsets_exist
+
+ - name: FAIL ON Resource migration 'PetSets' unsupported
+ fail:
+ msg: >
+ PetSet objects were detected in your cluster. These are an
+ Alpha feature in upstream Kubernetes 1.4 and are not supported
+ by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
+ feature StatefulSets. Red Hat currently does not offer support
+ for either PetSets or StatefulSets.
+
+ Automatically migrating PetSets to StatefulSets in OpenShift
+ Container Platform (OCP) 3.5 is not supported. See the
+ Kubernetes "Upgrading from PetSets to StatefulSets"
+ documentation for additional information:
+
+ https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
+
+ PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
+ strongly recommends reading the above referenced documentation
+ in its entirety before taking any destructive actions.
+
+ If you want to simply remove all PetSets without manually
+ migrating to StatefulSets, run this command as a user with
+ cluster-admin privileges:
+
+ $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascale=false
+ when:
+ # Search did not fail, valid resource type found
+ - l_do_petsets_exist.results.returncode == "0"
+ # Items do exist in the search results
+ - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index c2b72339c..1ccae61f2 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -2,11 +2,18 @@
# The version of Contiv binaries to use
contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC
+# The version of cni binaries
+cni_version: v0.4.0
+
contiv_default_subnet: "20.1.1.1/24"
contiv_default_gw: "20.1.1.254"
# TCP port that Netmaster listens for network connections
netmaster_port: 9999
+# Default for contiv_role
+contiv_role: netmaster
+
+
# TCP port that Netplugin listens for network connections
netplugin_port: 6640
contiv_rpc_port1: 9001
@@ -33,6 +40,14 @@ bin_dir: /usr/bin
# Path to the contivk8s cni binary
cni_bin_dir: /opt/cni/bin
+# Path to cni archive download directory
+cni_download_dir: /tmp
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2"
+
+
# Contiv config directory
contiv_config_dir: /opt/contiv/config
diff --git a/roles/contiv/files/loopback b/roles/contiv/files/loopback
deleted file mode 100644
index f02b0b1fb..000000000
--- a/roles/contiv/files/loopback
+++ /dev/null
Binary files differ
diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml
index 28ed50fae..319fce46c 100644
--- a/roles/contiv/tasks/download_bins.yml
+++ b/roles/contiv/tasks/download_bins.yml
@@ -25,3 +25,22 @@
src: "{{ contiv_current_release_directory }}/netplugin-{{ contiv_version }}.tar.bz2"
dest: "{{ contiv_current_release_directory }}"
copy: no
+
+- name: Download Bins | Download cni tar file
+ get_url:
+ url: "{{ cni_bin_url }}"
+ dest: "{{ cni_download_dir }}"
+ mode: 0755
+ validate_certs: False
+ environment:
+ http_proxy: "{{ http_proxy|default('') }}"
+ https_proxy: "{{ https_proxy|default('') }}"
+ no_proxy: "{{ no_proxy|default('') }}"
+ register: download_file
+
+- name: Download Bins | Extract cni tar file
+ unarchive:
+ src: "{{ download_file.dest }}"
+ dest: "{{ cni_download_dir }}"
+ copy: no
+ when: download_file.changed
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index ec6c72fe9..97b9762df 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -43,8 +43,9 @@
- name: Netplugin | Copy CNI loopback bin
copy:
- src: loopback
+ src: "{{ cni_download_dir }}/loopback"
dest: "{{ cni_bin_dir }}/loopback"
+ remote_src: True
mode: 0755
- name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
index 53ed56abc..9b47cb6ab 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/docker/templates/custom.conf.j2
@@ -1,5 +1,5 @@
# {{ ansible_managed }}
[Unit]
-Requires=iptables.service
+Wants=iptables.service
After=iptables.service
diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml
index bb6fabf64..649ad23c1 100644
--- a/roles/etcd/tasks/etcdctl.yml
+++ b/roles/etcd/tasks/etcdctl.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd for etcdctl
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
- name: Configure etcd profile.d alises
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index b4ffc99e3..c09da3b61 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,7 +7,7 @@
etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
- name: Pull etcd container
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 242c1e997..4ae9b79c4 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install etcd
- package: name=etcd state=present
+ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
- name: Check status of etcd certificates
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index e691c2ea7..1d0e4c876 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1381,7 +1381,16 @@ class OCObjectValidator(OpenShiftCLI):
all_invalid[invalid_msg] = invalid
if failed:
- return {'failed': True, 'msg': 'All objects are not valid.', 'state': 'list', 'results': all_invalid}
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
return {'msg': 'All objects are valid.'}
diff --git a/roles/lib_openshift/src/class/oc_objectvalidator.py b/roles/lib_openshift/src/class/oc_objectvalidator.py
index b76fc995e..43f6cac67 100644
--- a/roles/lib_openshift/src/class/oc_objectvalidator.py
+++ b/roles/lib_openshift/src/class/oc_objectvalidator.py
@@ -72,6 +72,15 @@ class OCObjectValidator(OpenShiftCLI):
all_invalid[invalid_msg] = invalid
if failed:
- return {'failed': True, 'msg': 'All objects are not valid.', 'state': 'list', 'results': all_invalid}
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
return {'msg': 'All objects are valid.'}
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
index a97d0493e..da326742f 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -891,7 +891,7 @@ class OCObjectValidatorTest(unittest.TestCase):
# Assert
self.assertTrue(results['failed'])
- self.assertEqual(results['msg'], 'All objects are not valid.')
+ self.assertIn('All objects are not valid.', results['msg'])
self.assertEqual(results['state'], 'list')
self.assertEqual(results['results'], invalid_results)
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 6c90b4e96..e76a15952 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -15,8 +15,11 @@ Facts
| Name | Default Value | Description |
-----------------------------|---------------|----------------------------------------|
-| docker_excluder_enabled | none | Records the status of docker excluder |
-| openshift_excluder_enabled | none | Records the status of the openshift excluder |
+| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
+| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
+| enable_excluders | None | Enable all excluders
+| enable_docker_excluder_override | None | indication the docker excluder needs to be enabled |
+| disable_openshift_excluder_override | None | indication the openshift excluder needs to be disabled |
Role Variables
--------------
@@ -25,6 +28,16 @@ None
Dependencies
------------
+Tasks to include
+----------------
+
+- exclude: enable excluders (assuming excluders are installed)
+- unexclude: disable excluders (assuming excluders are installed)
+- install: install excluders (installation is followed by excluder enabling)
+- enable: enable excluders (optionally with installation step)
+- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
+- status: determine status of excluders
+
Example Playbook
----------------
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
new file mode 100644
index 000000000..7c3ae2a86
--- /dev/null
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# keep the 'current' package or update to 'latest' if available?
+openshift_excluder_package_state: present
+docker_excluder_package_state: present
+
+enable_excluders: true
diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml
index 8bca38e77..4d1c1efca 100644
--- a/roles/openshift_excluder/meta/main.yml
+++ b/roles/openshift_excluder/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml
new file mode 100644
index 000000000..2535b9ea6
--- /dev/null
+++ b/roles/openshift_excluder/tasks/adjust.yml
@@ -0,0 +1,23 @@
+---
+# Depending on enablement of individual excluders and their status
+# some excluders needs to be disabled, resp. enabled
+# By default, all excluders are disabled unless overrided.
+- block:
+ - include: init.yml
+ # All excluders that are to be enabled are enabled
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided
+ enable_docker_excluder: "{{ enable_docker_excluder_override | default(false) | bool }}"
+ # excluder is to be disabled by default
+ enable_openshift_excluder: false
+ # All excluders that are to be disabled are disabled
+ - include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ disable_docker_excluder: "{{ not enable_docker_excluder_override | default(not docker_excluder_on) | bool }}"
+ # disable openshift excluder is never overrided to be enabled
+ # disable it if the docker excluder is enabled
+ disable_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
new file mode 100644
index 000000000..a8deb3eb1
--- /dev/null
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -0,0 +1,26 @@
+---
+# input variables
+# - with_status_check
+# - with_install
+# - excluder_package_state
+# - docker_excluder_package_state
+- include: init.yml
+
+# Install any excluder that is enabled
+- include: install.yml
+ vars:
+ # Both docker_excluder_on and openshift_excluder_on are set in openshift_excluder->init task
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: docker_excluder_on or openshift_excluder_on
+
+ # if the docker excluder is not enabled, we don't care about its status
+ # it the docker excluder is enabled, we install it and in case its status is non-zero
+ # it is enabled no matter what
+
+# Check the current state of all excluders
+- include: status.yml
+ when: with_status_check | default(docker_excluder_on or openshift_excluder_on) | bool
+
+ # And finally adjust an excluder in order to update host components correctly
+- include: adjust.yml
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
new file mode 100644
index 000000000..413c7b5cf
--- /dev/null
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -0,0 +1,21 @@
+---
+# input variables:
+# - with_install
+- block:
+ - include: init.yml
+
+ - include: install.yml
+ vars:
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: with_install | default(docker_excluder_on or openshift_excluder_on) | bool
+
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided, resp. enabled by default (in that order)
+ enable_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}"
+ # Enable the openshift excluder only if it is not overrided, resp. enabled by default (in that order)
+ enable_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}"
+
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index 570183aef..af9824aae 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,11 +1,20 @@
---
-- include: install.yml
- when: not openshift.common.is_containerized | bool
+# input variables:
+# - enable_docker_excluder
+# - enable_openshift_excluder
+- block:
+ - name: Enable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder exclude"
+ # if the docker override is set, it means the docker excluder needs to be enabled no matter what
+ # if the docker override is not set, the excluder is set based on enable_docker_excluder
+ when:
+ - enable_docker_excluder | default(false) | bool
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when: not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when: not openshift.common.is_containerized | bool
+ - name: Enable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder exclude"
+ # if the openshift override is set, it means the openshift excluder is disabled no matter what
+ # if the openshift override is not set, the excluder is set based on enable_openshift_excluder
+ when:
+ - enable_openshift_excluder | default(false) | bool
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml
new file mode 100644
index 000000000..1ea18f363
--- /dev/null
+++ b/roles/openshift_excluder/tasks/init.yml
@@ -0,0 +1,12 @@
+---
+- name: Evalute if docker excluder is to be enabled
+ set_fact:
+ docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders) | bool }}"
+
+- debug: var=docker_excluder_on
+
+- name: Evalute if openshift excluder is to be enabled
+ set_fact:
+ openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders) | bool }}"
+
+- debug: var=openshift_excluder_on
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index ee4cb2c05..dcc8df0cb 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,16 +1,21 @@
---
-- name: Install latest excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
+# input Variables
+# - install_docker_excluder
+# - install_openshift_excluder
+- block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ openshift.common.service_type }}-docker-excluder"
+ state: "{{ docker_excluder_package_state }}"
+ when:
+ - install_docker_excluder | default(true) | bool
-- name: Install latest docker excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
+ - name: Install openshift excluder
+ package:
+ name: "{{ openshift.common.service_type }}-excluder"
+ state: "{{ openshift_excluder_package_state }}"
+ when:
+ - install_openshift_excluder | default(true) | bool
when:
- - docker_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/reset.yml b/roles/openshift_excluder/tasks/reset.yml
deleted file mode 100644
index 486a23fd0..000000000
--- a/roles/openshift_excluder/tasks/reset.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when:
- - docker_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml
index ef118d94c..363ccdbea 100644
--- a/roles/openshift_excluder/tasks/status.yml
+++ b/roles/openshift_excluder/tasks/status.yml
@@ -1,8 +1,4 @@
---
-# Latest versions of the excluders include a status function, old packages dont
-# So, if packages are installed, upgrade them to the latest so we get the status
-# If they're not installed when we should assume they're disabled
-
- name: Determine if excluder packages are installed
rpm_q:
name: "{{ openshift.common.service_type }}-excluder"
@@ -10,49 +6,79 @@
register: openshift_excluder_installed
failed_when: false
+# docker excluder needs to be enable by default
- name: Determine if docker packages are installed
rpm_q:
- name: "{{ openshift.common.service_type }}-excluder"
+ name: "{{ openshift.common.service_type }}-docker-excluder"
state: present
register: docker_excluder_installed
failed_when: false
-- name: Update to latest excluder packages
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
+# The excluder status function returns 0 when everything is excluded
+# and 1 if any packages are missing from the exclusions list and outputs a warning to stderr
+# # atomic-openshift-excluder status ; echo $?
+# exclude -- All packages excluded
+# 0
+# # atomic-openshift-excluder unexclude
+# # atomic-openshift-excluder status ; echo $?
+# unexclude -- At least one package not excluded
+# 1
-- name: Update to the latest docker-excluder packages
- package:
- name: "{{ openshift.common.service_type }}-docker-excluder"
- state: latest
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
+- block:
+ - include: init.yml
+ - block:
+ - name: Record openshift excluder status
+ command: "{{ openshift.common.service_type }}-excluder status"
+ register: excluder_status
+ failed_when: false
-- name: Record excluder status
- command: "{{ openshift.common.service_type }}-excluder"
- register: excluder_status
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
+ # Even though the openshift excluder is enabled
+ # if the status is non-zero, disabled the excluder
+ - name: Override openshift excluder enablement if the status is non-zero
+ set_fact:
+ disable_openshift_excluder_override: true
+ when:
+ - "{{ excluder_status.rc | default(0) != 0 }}"
-- name: Record docker excluder status
- command: "{{ openshift.common.service_type }}-docker-excluder"
- register: docker_excluder_status
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
+ - debug:
+ msg: "Disabling openshift excluder"
+ when:
+ - "{{ excluder_status.rc | default(0) != 0 }}"
+
+ when:
+ - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
+ - "{{ openshift_excluder_on }}"
+
+ - block:
+ - name: Record docker excluder status
+ command: "{{ openshift.common.service_type }}-docker-excluder status"
+ register: docker_excluder_status
+ failed_when: false
-- name: Set excluder status facts
- set_fact:
- docker_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or docker_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
- openshift_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or openshift_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
+ # If the docker excluder is installed and the status is non-zero
+ # always enable the docker excluder
+ - name: Override docker excluder enablement if the status is non-zero
+ set_fact:
+ enable_docker_excluder_override: true
+ when:
+ - "{{ docker_excluder_status.rc | default(0) != 0 }}"
-- debug: var=docker_excluder_enabled
-- debug: var=openshift_excluder_enabled
+ - debug:
+ msg: "Enabling docker excluder"
+ when:
+ - "{{ docker_excluder_status.rc | default(0) != 0 }}"
+
+ # As the docker excluder status is not satisfied,
+ # re-enable entire docker excluder again
+ # At the same time keep the override set in a case other task would
+ - name: Enable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder exclude"
+
+ # Run the docker excluder status even if the excluder is disabled.
+ # In order to determine of the excluder needs to be enabled.
+ when:
+ - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
+ - "{{ docker_excluder_on }}"
+
+ when:
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 38f0759aa..196ca25f5 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -1,12 +1,19 @@
---
-- name: disable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
- when:
- - docker_excluder_enabled | bool
- - not openshift.common.is_containerized | bool
+# input variables:
+# - disable_docker_excluder
+# - disable_openshift_excluder
+- block:
+ - include: init.yml
+
+ - name: disable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
+ when:
+ - disable_docker_excluder | default(false) | bool
+
+ - name: disable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder unexclude"
+ when:
+ - disable_openshift_excluder | default(false) | bool
-- name: disable excluder
- command: "{{ openshift.common.service_type }}-excluder unexclude"
when:
- - openshift_excluder_enabled | bool
- - not openshift.common.is_containerized | bool
+ - not openshift.common.is_atomic | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 75b55c369..8ea900e21 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -2319,14 +2319,19 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
- # remove duplicate and empty strings from registry lists
+ # remove duplicate and empty strings from registry lists, preserving order
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
- new_local_facts['docker'][key] = list(set(val) - set(['']))
+ seen = set()
+ new_local_facts['docker'][key] = list()
+ for registry in val:
+ if registry not in seen and registry != '':
+ seen.add(registry)
+ new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index c538ff7a1..73c668c72 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -13,6 +13,8 @@
l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+- set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
- name: Validate python version
fail:
@@ -50,6 +52,13 @@
with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
+- name: Ensure various deps for running system containers are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_system_containers_packages }}"
+ when:
+ - not l_is_atomic | bool
+ - l_any_system_container | bool
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
index 9c3110ff6..07f5100ad 100644
--- a/roles/openshift_facts/vars/main.yml
+++ b/roles/openshift_facts/vars/main.yml
@@ -5,3 +5,8 @@ required_packages:
- python-six
- PyYAML
- yum-utils
+
+required_system_containers_packages:
+ - atomic
+ - ostree
+ - runc
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 5440a3647..ad9c1ce42 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,11 +1,12 @@
---
openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
-openshift_logging_use_ops: False
+openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"
openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' + openshift.master.api_port) }}"
openshift_logging_namespace: logging
openshift_logging_install_logging: True
+openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
openshift_logging_curator_default_days: 30
openshift_logging_curator_run_hour: 0
@@ -15,9 +16,11 @@ openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
openshift_logging_curator_cpu_limit: 100m
openshift_logging_curator_memory_limit: null
+openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' + openshift.common.dns_domain) }}"
openshift_logging_kibana_cpu_limit: null
@@ -28,6 +31,9 @@ openshift_logging_kibana_proxy_memory_limit: null
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
+openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
+openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
+
#The absolute path on the control node to the cert file to use
#for the public facing kibana certs
openshift_logging_kibana_cert: ""
@@ -48,12 +54,13 @@ openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: null
openshift_logging_kibana_ops_replica_count: 1
-openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
+openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
openshift_logging_fluentd_cpu_limit: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_es_copy: false
-openshift_logging_fluentd_use_journal: ''
-openshift_logging_fluentd_journal_read_from_head: ''
+openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
openshift_logging_fluentd_hosts: ['--all']
openshift_logging_es_host: logging-es
@@ -63,13 +70,14 @@ openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
openshift_logging_es_cpu_limit: null
-openshift_logging_es_memory_limit: 8Gi
+openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
openshift_logging_es_pv_selector: null
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: 65534
+openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"
# allow cluster-admin or cluster-reader to view operations index
openshift_logging_es_ops_allow_cluster_reader: False
@@ -81,13 +89,18 @@ openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
-openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}"
openshift_logging_es_ops_pv_selector: None
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: 65534
+openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
+
+# storage related defaults
+openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default('ReadWriteOnce') }}"
+
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 007be3ac0..9beffaef7 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -25,6 +25,14 @@ def entry_from_named_pair(register_pairs, key):
raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
+def map_from_pairs(source, delim="="):
+ ''' Returns a dict given the source and delim delimited '''
+ if source == '':
+ return dict()
+
+ return dict(source.split(delim) for item in source.split(","))
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -35,4 +43,5 @@ class FilterModule(object):
return {
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
+ 'map_from_pairs': map_from_pairs,
}
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
index d6d1abd06..e1629908f 100644
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -6,8 +6,8 @@
start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}"
with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}}
when:
+ - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}"
- es_pvc_size | search('^\d.*')
- - "{{ es_dc_names|default([]) | length < es_cluster_size|int }}"
check_mode: no
- name: Generating PersistentVolumeClaims
@@ -16,7 +16,7 @@
obj_name: "{{claim_name}}"
size: "{{es_pvc_size}}"
access_modes:
- - ReadWriteOnce
+ - "{{ es_access_modes }}"
pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool | default([])}}"
@@ -36,7 +36,7 @@
volume.alpha.kubernetes.io/storage-class: "dynamic"
size: "{{es_pvc_size}}"
access_modes:
- - ReadWriteOnce
+ - "{{ es_access_modes }}"
pv_selector: "{{es_pv_selector}}"
with_items:
- "{{es_pvc_pool|default([])}}"
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
index 5b474ff39..ab8e207f1 100644
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -31,7 +31,7 @@
curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
replicas: "{{curator_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_nodeselector | default({}) }}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 6b441c4aa..a0ad12d94 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -12,6 +12,7 @@
es_pvc_size: "{{openshift_logging_es_pvc_size}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
es_cluster_size: "{{openshift_logging_es_cluster_size}}"
+ es_access_modes: "{{ openshift_logging_storage_access_modes }}"
# we should initialize the es_dc_pool with the current keys
- name: Init pool of DeploymentConfig names for Elasticsearch
@@ -44,7 +45,7 @@
volume_names: "{{es_pvc_pool | default([])}}"
pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
deploy_name: "{{item.1}}"
- es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
+ es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"
with_indexed_items:
- "{{ es_dc_pool }}"
check_mode: no
@@ -77,6 +78,7 @@
es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}"
es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ es_access_modes: "{{ openshift_logging_storage_access_modes }}"
when:
- openshift_logging_use_ops | bool
check_mode: no
@@ -119,7 +121,7 @@
es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
- es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
+ es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) | map_from_pairs }}"
with_indexed_items:
- "{{ es_ops_dc_pool | default([]) }}"
when:
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
index 3aeff2cac..52bdeb50d 100644
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -35,7 +35,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
replicas: "{{kibana_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({}) }}"
+ kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}"
check_mode: no
changed_when: no
@@ -54,7 +54,7 @@
kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({}) }}"
+ kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}"
when: openshift_logging_use_ops | bool
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index e23c3f9f1..83b68fa77 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -76,7 +76,9 @@
register: link_pull_secret
loop_control:
loop_var: sa_account
- when: openshift_logging_image_pull_secret is defined
+ when:
+ - openshift_logging_image_pull_secret is defined
+ - openshift_logging_image_pull_secret != ''
failed_when: link_pull_secret.rc != 0
- name: Scaling up cluster
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index 223d342b9..0bf1686ad 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -119,7 +119,7 @@ spec:
- name: "USE_JOURNAL"
value: "{{openshift_logging_fluentd_use_journal|lower}}"
- name: "JOURNAL_SOURCE"
- value: "{{fluentd_journal_source | default('')}}"
+ value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
volumes:
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index edaa7d0df..db4a0e1fc 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -16,8 +16,8 @@ openshift_metrics_hawkular_ca: ""
openshift_metrics_hawkular_nodeselector: ""
openshift_metrics_cassandra_replicas: 1
-openshift_metrics_cassandra_storage_type: emptydir
-openshift_metrics_cassandra_pvc_size: 10Gi
+openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}"
+openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}"
openshift_metrics_cassandra_limits_memory: 2G
openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
@@ -46,7 +46,8 @@ openshift_metrics_master_url: https://kubernetes.default.svc.cluster.local
openshift_metrics_node_id: nodename
openshift_metrics_project: openshift-infra
-openshift_metrics_cassandra_pvc_prefix: metrics-cassandra
+openshift_metrics_cassandra_pvc_prefix: "{{ openshift_hosted_metrics_storage_volume_name | default('metrics-cassandra') }}"
+openshift_metrics_cassandra_pvc_access: "{{ openshift_hosted_metrics_storage_access_modes | default('ReadWriteOnce') }}"
openshift_metrics_hawkular_user_write_access: False
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index df39c1e1f..66c81562b 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -31,10 +31,12 @@
labels:
metrics-infra: hawkular-cassandra
access_modes:
- - ReadWriteOnce
+ - "{{ openshift_metrics_cassandra_pvc_access }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when: openshift_metrics_cassandra_storage_type == 'pv'
+ when:
+ - openshift_metrics_cassandra_storage_type != 'emptydir'
+ - openshift_metrics_cassandra_storage_type != 'dynamic'
changed_when: false
- name: generate hawkular-cassandra persistent volume claims (dynamic)
@@ -48,7 +50,7 @@
annotations:
volume.alpha.kubernetes.io/storage-class: dynamic
access_modes:
- - ReadWriteOnce
+ - "{{ openshift_metrics_cassandra_pvc_access }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'