diff options
286 files changed, 7271 insertions, 1878 deletions
diff --git a/.redhat-ci.inventory b/.papr.inventory index 23bc9923c..23bc9923c 100644 --- a/.redhat-ci.inventory +++ b/.papr.inventory diff --git a/.redhat-ci.sh b/.papr.sh index fce8c1d52..decca625f 100755 --- a/.redhat-ci.sh +++ b/.papr.sh @@ -1,10 +1,12 @@ #!/bin/bash set -xeuo pipefail +echo "Targeting OpenShift Origin $OPENSHIFT_IMAGE_TAG" + pip install -r requirements.txt # ping the nodes to check they're responding and register their ostree versions -ansible -vvv -i .redhat-ci.inventory nodes -a 'rpm-ostree status' +ansible -vvv -i .papr.inventory nodes -a 'rpm-ostree status' upload_journals() { mkdir journals @@ -16,7 +18,9 @@ upload_journals() { trap upload_journals ERR # run the actual installer -ansible-playbook -vvv -i .redhat-ci.inventory playbooks/byo/config.yml +# FIXME: override openshift_image_tag defined in the inventory until +# https://github.com/openshift/openshift-ansible/issues/4478 is fixed. +ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG" # run a small subset of origin conformance tests to sanity # check the cluster NB: we run it on the master since we may diff --git a/.papr.yml b/.papr.yml new file mode 100644 index 000000000..16d6e78b1 --- /dev/null +++ b/.papr.yml @@ -0,0 +1,42 @@ +--- + +# This YAML file is used by PAPR. It details the test +# environment to provision and the test procedure. For more +# information on PAPR, see: +# +# https://github.com/projectatomic/papr +# +# The PAPR YAML specification detailing allowed fields can +# be found at: +# +# https://github.com/projectatomic/papr/blob/master/sample.papr.yml + +cluster: + hosts: + - name: ocp-master + distro: fedora/25/atomic + - name: ocp-node1 + distro: fedora/25/atomic + - name: ocp-node2 + distro: fedora/25/atomic + container: + image: fedora:25 + +packages: + - gcc + - python-pip + - python-devel + - libffi-devel + - openssl-devel + - redhat-rpm-config + +context: 'fedora/25/atomic' + +env: + OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1 + +tests: + - ./.papr.sh + +artifacts: + - journals/ diff --git a/.redhat-ci.yml b/.redhat-ci.yml deleted file mode 100644 index 6dac7b256..000000000 --- a/.redhat-ci.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- - -cluster: - hosts: - - name: ocp-master - distro: fedora/25/atomic - - name: ocp-node1 - distro: fedora/25/atomic - - name: ocp-node2 - distro: fedora/25/atomic - container: - image: fedora:25 - -packages: - - gcc - - python-pip - - python-devel - - openssl-devel - - redhat-rpm-config - -context: 'fedora/25/atomic | origin/v3.6.0-alpha.1' - -env: - OPENSHIFT_IMAGE_TAG: v3.6.0-alpha.1 - -tests: - - ./.redhat-ci.sh - -artifacts: - - journals/ diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index c21e58493..a83752c29 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.99-1 ./ +3.6.123-1 ./ diff --git a/ansible.cfg b/ansible.cfg index 034733684..0c74d63da 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -14,6 +14,7 @@ callback_plugins = callback_plugins/ forks = 20 host_key_checking = False retry_files_enabled = False +retry_files_save_path = ~/ansible-installer-retries nocows = True # Uncomment to use the provided BYO inventory diff --git a/docs/pull_requests.md b/docs/pull_requests.md index fcc3e275c..45ae01a9d 100644 --- a/docs/pull_requests.md +++ b/docs/pull_requests.md @@ -10,8 +10,8 @@ Whenever a [Pull Request is opened](../CONTRIBUTING.md#submitting-contributions), some automated test jobs must be successfully run before the PR can be merged. -Some of these jobs are automatically triggered, e.g., Travis and Coveralls. -Other jobs need to be manually triggered by a member of the +Some of these jobs are automatically triggered, e.g., Travis, PAPR, and +Coveralls. Other jobs need to be manually triggered by a member of the [Team OpenShift Ansible Contributors](https://github.com/orgs/openshift/teams/team-openshift-ansible-contributors). ## Triggering tests @@ -48,9 +48,9 @@ simplifying the workflow towards a single infrastructure in the future. There are a set of tests that run on Fedora infrastructure. They are started automatically with every pull request. -They are implemented using the [`redhat-ci` framework](https://github.com/jlebon/redhat-ci). +They are implemented using the [`PAPR` framework](https://github.com/projectatomic/papr). -To re-run tests, write a comment containing `bot, retest this please`. +To re-run tests, write a comment containing only `bot, retest this please`. ## Triggering merge diff --git a/docs/repo_structure.md b/docs/repo_structure.md index 693837fba..f598f22c3 100644 --- a/docs/repo_structure.md +++ b/docs/repo_structure.md @@ -52,3 +52,16 @@ These are plugins used in playbooks and roles: . └── test Contains tests. ``` + +### CI + +These files are used by [PAPR](https://github.com/projectatomic/papr), +It is very similar in workflow to Travis, with the test +environment and test scripts defined in a YAML file. + +``` +. +├── .papr.yml +├── .papr.sh +└── .papr.inventory +``` diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 8b279981d..cff9f8a60 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -715,7 +715,7 @@ def oo_openshift_env(hostvars): return facts -# pylint: disable=too-many-branches, too-many-nested-blocks +# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables. @@ -747,10 +747,15 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): volume = params['volume']['name'] path = directory + '/' + volume size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() access_modes = params['access']['modes'] persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, + labels=labels, access_modes=access_modes, storage=dict( nfs=dict( @@ -760,12 +765,17 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): elif kind == 'openstack': volume = params['volume']['name'] size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() access_modes = params['access']['modes'] filesystem = params['openstack']['filesystem'] volume_id = params['openstack']['volumeID'] persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, + labels=labels, access_modes=access_modes, storage=dict( cinder=dict( @@ -775,6 +785,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): elif kind == 'glusterfs': volume = params['volume']['name'] size = params['volume']['size'] + if 'labels' in params: + labels = params['labels'] + else: + labels = dict() access_modes = params['access']['modes'] endpoints = params['glusterfs']['endpoints'] path = params['glusterfs']['path'] @@ -782,6 +796,7 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, + labels=labels, access_modes=access_modes, storage=dict( glusterfs=dict( diff --git a/hack/build-images.sh b/hack/build-images.sh index ce421178f..6e6d360bf 100755 --- a/hack/build-images.sh +++ b/hack/build-images.sh @@ -47,7 +47,7 @@ if [ "$help" = true ]; then echo " default: openshift/origin-ansible" echo echo " --version=VERSION" - echo " The version used to tag the image" + echo " The version used to tag the image (can be a comma-separated list)" echo " default: latest" echo echo " --no-cache" @@ -62,25 +62,33 @@ if [ "$help" = true ]; then exit 0 fi + if [ "$verbose" = true ]; then set -x fi BUILD_STARTTIME=$(date +%s) comp_path=$source_root/ -docker_tag=${prefix}:${version} + +# turn comma-separated versions into -t args for docker build +IFS=',' read -r -a version_arr <<< "$version" +docker_tags=() +for tag in "${version_arr[@]}"; do + docker_tags+=("-t" "${prefix}:${tag}") +done + echo echo -echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---" -docker build ${options} -t $docker_tag $comp_path -BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---" +echo "--- Building component '$comp_path' with docker tag(s) '$version' ---" +docker build ${options} "${docker_tags[@]}" $comp_path +BUILD_ENDTIME=$(date +%s); echo "--- ${version} took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---" echo echo echo echo echo "++ Active images" -docker images | grep ${prefix} | grep ${version} | sort +docker images | grep ${prefix} | sort echo diff --git a/hack/push-release.sh b/hack/push-release.sh index 131ed83ca..1f41ab179 100755 --- a/hack/push-release.sh +++ b/hack/push-release.sh @@ -1,55 +1,41 @@ #!/bin/bash -# This script pushes all of the built images to a registry. +# This script pushes a built image to a registry. # -# Set OS_PUSH_BASE_REGISTRY to prefix the destination images +# Set OS_PUSH_BASE_REGISTRY to prefix the destination images e.g. +# OS_PUSH_BASE_REGISTRY="docker.io/" # +# Set OS_PUSH_TAG with a comma-separated list for pushing same image +# to multiple tags e.g. +# OS_PUSH_TAG="latest,v3.6" set -o errexit set -o nounset set -o pipefail -STARTTIME=$(date +%s) -OS_ROOT=$(dirname "${BASH_SOURCE}")/.. +starttime=$(date +%s) -PREFIX="${PREFIX:-openshift/origin-ansible}" +# image name without repo or tag. +image="${PREFIX:-openshift/origin-ansible}" -# Go to the top of the tree. -cd "${OS_ROOT}" +# existing local tag on the image we want to push +source_tag="${OS_TAG:-latest}" -# Allow a release to be repushed with a tag -tag="${OS_PUSH_TAG:-}" -if [[ -n "${tag}" ]]; then - tag=":${tag}" -else - tag=":latest" -fi - -# Source tag -source_tag="${OS_TAG:-}" -if [[ -z "${source_tag}" ]]; then - source_tag="latest" -fi - -images=( - ${PREFIX} -) +# Enable retagging a build with one or more tags for push +IFS=',' read -r -a push_tags <<< "${OS_PUSH_TAG:-latest}" +registry="${OS_PUSH_BASE_REGISTRY:-}" +# force push if available PUSH_OPTS="" if docker push --help | grep -q force; then PUSH_OPTS="--force" fi -if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then - set -e - for image in "${images[@]}"; do - docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}" - done - set +e -fi - -for image in "${images[@]}"; do - docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}" +set -x +for tag in "${push_tags[@]}"; do + docker tag "${image}:${source_tag}" "${registry}${image}:${tag}" + docker push ${PUSH_OPTS} "${registry}${image}:${tag}" done +set +x -ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret" +endtime=$(date +%s); echo "$0 took $(($endtime - $starttime)) seconds"; exit 0 diff --git a/images/installer/system-container/README.md b/images/installer/system-container/README.md index dc95307e5..fbcd47c4a 100644 --- a/images/installer/system-container/README.md +++ b/images/installer/system-container/README.md @@ -11,3 +11,21 @@ These files are needed to run the installer using an [Atomic System container](h * service.template - Template file for the systemd service. * tmpfiles.template - Template file for systemd-tmpfiles. + +## Options + +These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``root/exports/manifest.json`` + +* OPTS - Additional options to pass to ansible when running the installer + +* VAR_LIB_OPENSHIFT_INSTALLER - Full path of the installer code to mount into the container + +* VAR_LOG_OPENSHIFT_LOG - Full path of the log file to mount into the container + +* PLAYBOOK_FILE - Full path of the playbook inside the container + +* HOME_ROOT - Full path on host to mount as the root home directory inside the container (for .ssh/, etc..) + +* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container + +* INVENTORY_FILE - Full path for the inventory to use from the host diff --git a/images/installer/system-container/root/exports/config.json.template b/images/installer/system-container/root/exports/config.json.template index 383e3696e..739c0080f 100644 --- a/images/installer/system-container/root/exports/config.json.template +++ b/images/installer/system-container/root/exports/config.json.template @@ -21,7 +21,8 @@ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm", "OPTS=$OPTS", - "PLAYBOOK_FILE=$PLAYBOOK_FILE" + "PLAYBOOK_FILE=$PLAYBOOK_FILE", + "ANSIBLE_CONFIG=$ANSIBLE_CONFIG" ], "cwd": "/opt/app-root/src/", "rlimits": [ @@ -102,7 +103,7 @@ }, { "type": "bind", - "source": "$SSH_ROOT", + "source": "$HOME_ROOT/.ssh", "destination": "/opt/app-root/src/.ssh", "options": [ "bind", @@ -112,8 +113,8 @@ }, { "type": "bind", - "source": "$SSH_ROOT", - "destination": "/root/.ssh", + "source": "$HOME_ROOT", + "destination": "/root", "options": [ "bind", "rw", @@ -171,6 +172,16 @@ ] }, { + "destination": "/etc/resolv.conf", + "type": "bind", + "source": "/etc/resolv.conf", + "options": [ + "ro", + "rbind", + "rprivate" + ] + }, + { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", diff --git a/images/installer/system-container/root/exports/manifest.json b/images/installer/system-container/root/exports/manifest.json index 1db845965..8b984d7a3 100644 --- a/images/installer/system-container/root/exports/manifest.json +++ b/images/installer/system-container/root/exports/manifest.json @@ -5,7 +5,8 @@ "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer", "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log", "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml", - "SSH_ROOT": "/root/.ssh", + "HOME_ROOT": "/root", + "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg", "INVENTORY_FILE": "/dev/null" } } diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.native-glusterfs.example index 2dbb57d40..dc847a5b2 100644 --- a/inventory/byo/hosts.byo.native-glusterfs.example +++ b/inventory/byo/hosts.byo.native-glusterfs.example @@ -24,7 +24,7 @@ glusterfs [OSEv3:vars] ansible_ssh_user=root -deployment_type=origin +openshift_deployment_type=origin # Specify that we want to use GlusterFS storage for a hosted registry openshift_hosted_registry_storage_kind=glusterfs diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index b2490638b..b38c6e6b6 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -42,6 +42,17 @@ openshift_release=v3.6 # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. #openshift_pkg_version=-3.6.0 +# This enables all the system containers except for docker: +#openshift_use_system_containers=False +# +# But you can choose separately each component that must be a +# system container: +# +#openshift_use_openvswitch_system_container=False +#openshift_use_node_system_container=False +#openshift_use_master_system_container=False +#openshift_use_etcd_system_container=False + # Install the openshift examples #openshift_install_examples=true @@ -191,6 +202,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # or #openshift_master_request_header_ca_file=<path to local ca file to use> +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_cfme/README.md for instructions +# and requirements. +#openshift_cfme_install_app=False + # Cloud Provider Configuration # # Note: You may make use of environment variables rather than store @@ -501,6 +519,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_metrics_storage_volume_name=metrics #openshift_hosted_metrics_storage_volume_size=10Gi +#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on @@ -512,6 +531,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_metrics_storage_nfs_directory=/exports #openshift_hosted_metrics_storage_volume_name=metrics #openshift_hosted_metrics_storage_volume_size=10Gi +#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. @@ -545,6 +565,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_logging_storage_volume_name=logging #openshift_hosted_logging_storage_volume_size=10Gi +#openshift_hosted_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on @@ -556,6 +577,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_logging_storage_nfs_directory=/exports #openshift_hosted_logging_storage_volume_name=logging #openshift_hosted_logging_storage_volume_size=10Gi +#openshift_hosted_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. @@ -782,6 +804,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Enable API service auditing, available as of 1.3 #openshift_master_audit_config={"enabled": true} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} # Enable origin repos that point at Centos PAAS SIG, defaults to true, only used # by deployment_type=origin @@ -798,6 +826,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Controls validity for etcd CA, peer, server and client certificates. # #etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false # Upgrade Control # diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 67d53b22d..e5e9c7342 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -42,6 +42,17 @@ openshift_release=v3.6 # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. #openshift_pkg_version=-3.6.0 +# This enables all the system containers except for docker: +#openshift_use_system_containers=False +# +# But you can choose separately each component that must be a +# system container: +# +#openshift_use_openvswitch_system_container=False +#openshift_use_node_system_container=False +#openshift_use_master_system_container=False +#openshift_use_etcd_system_container=False + # Install the openshift examples #openshift_install_examples=true @@ -190,6 +201,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # or #openshift_master_request_header_ca_file=<path to local ca file to use> +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_cfme/README.md for instructions +# and requirements. +#openshift_cfme_install_app=False + # Cloud Provider Configuration # # Note: You may make use of environment variables rather than store @@ -501,6 +519,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_metrics_storage_volume_name=metrics #openshift_hosted_metrics_storage_volume_size=10Gi +#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on @@ -512,6 +531,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_metrics_storage_nfs_directory=/exports #openshift_hosted_metrics_storage_volume_name=metrics #openshift_hosted_metrics_storage_volume_size=10Gi +#openshift_hosted_metrics_storage_labels={'storage': 'metrics'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. @@ -545,6 +565,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_logging_storage_volume_name=logging #openshift_hosted_logging_storage_volume_size=10Gi +#openshift_hosted_logging_storage_labels={'storage': 'logging'} # # Option B - External NFS Host # NFS volume must already exist with path "nfs_directory/_volume_name" on @@ -556,6 +577,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', #openshift_hosted_logging_storage_nfs_directory=/exports #openshift_hosted_logging_storage_volume_name=logging #openshift_hosted_logging_storage_volume_size=10Gi +#openshift_hosted_logging_storage_labels={'storage': 'logging'} # # Option C - Dynamic -- If openshift supports dynamic volume provisioning for # your cloud platform use this. @@ -782,6 +804,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Enable API service auditing, available as of 3.2 #openshift_master_audit_config={"enabled": true} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} # Validity of the auto-generated OpenShift certificates in days. # See also openshift_hosted_registry_cert_expire_days above. @@ -794,6 +822,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Controls validity for etcd CA, peer, server and client certificates. # #etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false # Upgrade Control # diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 729fe4441..1fcc9990c 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@ %global __requires_exclude ^/usr/bin/ansible-playbook$ Name: openshift-ansible -Version: 3.6.99 +Version: 3.6.123.1000 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -280,6 +280,178 @@ Atomic OpenShift Utilities includes %changelog +* Fri Jun 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.123-1 +- releases: enable build/push with multiple tags (lmeyer@redhat.com) +- Update template examples for 3.6 (rteague@redhat.com) +- Reverting v prefix introduced by stagecut (smunilla@redhat.com) +- Fixed readme doc. (kwoodson@redhat.com) +- Adding version field for stagecut (smunilla@redhat.com) +- Remove package_update from install playbook (rhcarvalho@gmail.com) +- Restart NetworkManager only if dnsmasq was used + (bliemli@users.noreply.github.com) +- remove extra close brace in example inventory (gpei@redhat.com) +- Adding option for serviceAccountConfig.limitSecretReferences + (kwoodson@redhat.com) +- doc: Add system_container examples to inventory (smilner@redhat.com) +- system_containers: Add openshift_ to other system_container vars + (smilner@redhat.com) +- system_containers: Add openshift_ to use_system_containers var + (smilner@redhat.com) +- detect etcd service name based on etcd runtime when restarting + (jchaloup@redhat.com) +- set proper etcd_data_dir for system container (jchaloup@redhat.com) +- etcd, system_container: do not mask etcd_container (gscrivan@redhat.com) +- etcd, system_container: do not enable system etcd (gscrivan@redhat.com) +- oc_atomic_container: Require 1.17.2 (smilner@redhat.com) +- Verify matched openshift_upgrade_nodes_label (rteague@redhat.com) +- bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com) + +* Wed Jun 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.122-1 +- + +* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.121-1 +- Updating default from null to "" (ewolinet@redhat.com) + +* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.120-1 +- Update atomic-openshift-master.j2 (sdodson@redhat.com) +- Enable push to registry via dns only on clean 3.6 installs + (sdodson@redhat.com) +- Disable actually pushing to the registry via dns for now (sdodson@redhat.com) +- Add openshift_node_dnsmasq role to upgrade (sdodson@redhat.com) +- Push to the registry via dns (sdodson@redhat.com) + +* Tue Jun 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.119-1 +- Temporarilly only migrate jobs as we were before (sdodson@redhat.com) +- Disable TLS verification in skopeo inspect (rhcarvalho@gmail.com) +- Preserve etcd3 storage if it's already in use (sdodson@redhat.com) +- GlusterFS: Generate better secret keys (jarrpa@redhat.com) +- GlusterFS: Fix error when groups.glusterfs_registry is undefined. + (jarrpa@redhat.com) +- GlusterFS: Use proper identity in heketi secret (jarrpa@redhat.com) +- GlusterFS: Allow configuration of heketi port (jarrpa@redhat.com) +- GlusterFS: Fix variable typo (jarrpa@redhat.com) +- GlusterFS: Minor template fixes (jarrpa@redhat.com) +- registry: mount GlusterFS storage volume from correct host + (jarrpa@redhat.com) + +* Mon Jun 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.117-1 +- Run storage upgrade pre and post master upgrade (rteague@redhat.com) +- Introduce etcd migrate role (jchaloup@redhat.com) +- Add support for rhel, aci, vxlan (srampal@cisco.com) + +* Sun Jun 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.116-1 +- PAPR: define openshift_image_tag via command line (rhcarvalho@gmail.com) +- Ensure only one ES pod per PV (peter.portante@redhat.com) +- etcd v3 for clean installs (sdodson@redhat.com) +- Rename cockpit-shell -> cockpit-system (rhcarvalho@gmail.com) +- Update image repo name, images have been moved from 'cloudforms' to + 'cloudforms42' for CF 4.2. (simaishi@redhat.com) +- Update image repo name, images have been moved from 'cloudforms' to + 'cloudforms45' for CF 4.5. (simaishi@redhat.com) +- CloudForms 4.5 templates (simaishi@redhat.com) + +* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.114-1 +- + +* Fri Jun 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.113-1 +- Make rollout status check best-effort, add poll (skuznets@redhat.com) +- Verify the rollout status of the hosted router and registry + (skuznets@redhat.com) +- fix es routes for new logging roles (rmeggins@redhat.com) + +* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.112-1 +- Add the the other featured audit-config paramters as example (al- + git001@none.at) + +* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.111-1 +- doc: Info for system container installer options (smilner@redhat.com) +- Add ANSIBLE_CONFIG to system container installer (smilner@redhat.com) +- Add missing file. Remove debugging prompt. (tbielawa@redhat.com) +- Update readme one last time (tbielawa@redhat.com) +- Reconfigure masters in serial to avoid HA meltdowns (tbielawa@redhat.com) +- First POC of a CFME turnkey solution in openshift-anisble + (tbielawa@redhat.com) +- Reverted most of this pr 4356 except: adding + openshift_logging_fluentd_buffer_queue_limit: 1024 + openshift_logging_fluentd_buffer_size_limit: 1m + openshift_logging_mux_buffer_queue_limit: 1024 + openshift_logging_mux_buffer_size_limit: 1m and setting the matched + environment variables. (nhosoi@redhat.com) +- Adding the defaults for openshift_logging_fluentd_{cpu,memory}_limit to + roles/openshift_logging_fluentd/defaults/main.yml. (nhosoi@redhat.com) +- Adding environment variables FLUENTD_CPU_LIMIT, FLUENTD_MEMORY_LIMIT, + MUX_CPU_LIMIT, MUX_MEMORY_LIMIT. (nhosoi@redhat.com) +- Introducing fluentd/mux buffer_queue_limit, buffer_size_limit, cpu_limit, and + memory_limit. (nhosoi@redhat.com) + +* Thu Jun 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.110-1 +- papr: add documentation to YAML and simplify context (jlebon@redhat.com) +- docs: better documentation for PAPR (jlebon@redhat.com) +- papr: install libffi-devel (jlebon@redhat.com) +- pre-install checks: add more during byo install (lmeyer@redhat.com) +- move etcd backup to etcd_common role (jchaloup@redhat.com) +- Support installing HOSA via ansible (mwringe@redhat.com) +- GlusterFS: Remove requirement for heketi-cli (jarrpa@redhat.com) +- GlusterFS: Fix bugs in wipe (jarrpa@redhat.com) +- GlusterFS: Skip heketi-cli install on Atomic (jarrpa@redhat.com) +- GlusterFS: Create a StorageClass if specified (jarrpa@redhat.com) +- GlusterFS: Use proper secrets (jarrpa@redhat.com) +- GlusterFS: Allow cleaner separation of multiple clusters (jarrpa@redhat.com) +- GlusterFS: Minor corrections and cleanups (jarrpa@redhat.com) +- GlusterFS: Improve documentation (jarrpa@redhat.com) +- GlusterFS: Allow configuration of kube namespace for heketi + (jarrpa@redhat.com) +- GlusterFS: Adjust when clauses for registry config (jarrpa@redhat.com) +- GlusterFS: Allow failure reporting when deleting deploy-heketi + (jarrpa@redhat.com) +- GlusterFS: Tweak pod probe parameters (jarrpa@redhat.com) +- GlusterFS: Allow for configuration of node selector (jarrpa@redhat.com) +- GlusterFS: Label on Openshift node name (jarrpa@redhat.com) +- GlusterFS: Make sure timeout is an int (jarrpa@redhat.com) +- GlusterFS: Use groups variables (jarrpa@redhat.com) +- papr: rename redhat-ci related files to papr (jlebon@redhat.com) +- singletonize some role tasks that repeat a lot (lmeyer@redhat.com) + +* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.109-1 +- + +* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.108-1 +- Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de) + +* Wed Jun 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.107-1 +- Disable negative caching, set cache TTL to 1s (skuznets@redhat.com) +- Update mounts in system container installer (smilner@redhat.com) +- Set ansible retry file location (smilner@redhat.com) +- installer: add bind mount for /etc/resolv.conf (gscrivan@redhat.com) +- Making pylint happy (ewolinet@redhat.com) +- Fix possible access to undefined variable (rhcarvalho@gmail.com) +- certificates: copy the certificates for the etcd system container + (gscrivan@redhat.com) +- Separate etcd and OpenShift CA redeploy playbooks. (abutcher@redhat.com) +- lib/base: allow for results parsing on non-zero return code + (jarrpa@redhat.com) +- etcd: system container defines ETCD_(PEER_)?TRUSTED_CA_FILE + (gscrivan@redhat.com) +- etcd: unmask system container service before installing it + (gscrivan@redhat.com) +- etcd: copy previous database when migrating to system container + (gscrivan@redhat.com) +- etcd: define data dir location for the system container (gscrivan@redhat.com) +- oc_obj: set _delete() rc to 0 if err is 'not found' (jarrpa@redhat.com) +- oc_obj: only check 'items' if exists in delete (jarrpa@redhat.com) +- Removed hardocded Calico Policy Controller URL (vincent.schwarzer@yahoo.de) +- Allowing openshift_metrics to specify PV selectors and allow way to define + selectors when creating pv (ewolinet@redhat.com) + +* Tue Jun 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.100-1 +- Change default key for gce (hekumar@redhat.com) +- set etcd working directory for embedded etcd (jchaloup@redhat.com) +- Add daemon-reload handler to openshift_node and notify when /etc/systemd + files have been updated. (abutcher@redhat.com) +- Use volume.beta.kubernetes.io annotation for storage-classes + (per.carlson@vegvesen.no) +- Correct master-config update during upgrade (rteague@redhat.com) + * Mon Jun 12 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.99-1 - Replace repoquery with module (jchaloup@redhat.com) - Consider previous value of 'changed' when updating (rhcarvalho@gmail.com) diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@ --- -- name: delete contiv +- name: Uninstall contiv hosts: all gather_facts: False tasks: diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 97d835eae..ddd2ecebd 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -103,7 +103,7 @@ - atomic-openshift-sdn-ovs - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - kubernetes-client - openshift @@ -317,6 +317,7 @@ - name: restart NetworkManager service: name=NetworkManager state=restarted + when: openshift_use_dnsmasq | default(true) | bool - hosts: masters become: yes @@ -346,7 +347,7 @@ - atomic-openshift-master - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - corosync - kubernetes-client diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +# tags: +# - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index fd4a9eb26..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -3,19 +3,6 @@ tags: - always -- name: Verify Requirements - hosts: OSEv3 - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: "install" - post_tasks: - - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml new file mode 100644 index 000000000..29f821eda --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index 3b33e0d6f..6e11a111b 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -7,4 +7,4 @@ tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml +- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..a9fc18958 --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,12 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/service_catalog.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..fd02e066e --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,124 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + tags: + - always + +- name: Run pre-checks + hosts: oo_etcd_to_config + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + etcd_peer: "{{ ansible_default_ipv4.address }}" + +# TODO(jchaloup): replace the std_include with something minimal so the entire playbook is faster +# e.g. I don't need to detect the OCP version, install deps, etc. +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- name: Backup v2 data + hosts: oo_etcd_to_config + gather_facts: no + tags: + - always + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_config) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_config | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master' }}" + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + when: + - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Migrate etcd data from v2 to v3 + hosts: oo_etcd_to_config + gather_facts: no + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + etcd_peer: "{{ ansible_default_ipv4.address }}" + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_config) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_config | difference(etcd_migration_completed) }}" + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + +- name: Start masters after etcd data migration + hosts: oo_masters_to_config + tasks: + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cfme/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cfme/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 1bee460e8..c7766ff04 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,4 +1,9 @@ --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + - name: Run OpenShift health checks hosts: OSEv3 roles: diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index e01c6f38d..7ca9f7e8b 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,4 +1,9 @@ --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + - hosts: OSEv3 name: run OpenShift pre-install checks roles: diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1482b3a3f..7224ae712 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,4 +1,23 @@ --- +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: "install" + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + - include: initialize_oo_option_facts.yml tags: - always @@ -45,6 +64,12 @@ tags: - hosted +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool + tags: + - servicecatalog + - name: Re-enable excluder if it was previously enabled hosts: oo_masters_to_config:oo_nodes_to_config tags: diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 46932b27f..c28ce4c14 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -155,5 +155,5 @@ groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml new file mode 100644 index 000000000..6964e8567 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -0,0 +1,158 @@ +--- +- name: Check cert expirys + hosts: oo_etcd_to_config:oo_masters_to_config + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry + +- name: Backup existing etcd CA certificate directories + hosts: oo_etcd_to_config + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat + - name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool + - name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent + when: etcd_ca_certs_dir_stat.stat.exists | bool + +- name: Generate new etcd CA + hosts: oo_first_etcd + roles: + - role: openshift_etcd_ca + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_etcd_mktemp + changed_when: false + +- name: Distribute etcd CA to etcd hosts + hosts: oo_etcd_to_config + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + - name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + - name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + - name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + - copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" + +- include: ../../openshift-etcd/restart.yml + # Do not restart etcd when etcd certificates were previously expired. + when: ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) + +- name: Retrieve etcd CA certificate + hosts: oo_first_etcd + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + +- name: Distribute etcd CA to masters + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Deploy etcd CA + copy: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" + dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" + when: groups.oo_etcd_to_config | default([]) | length > 0 + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: + name: "{{ g_etcd_mktemp.stdout }}" + state: absent + changed_when: false + +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 0d94a011a..089ae6bbc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -7,7 +7,7 @@ when: not openshift.common.version_gte_3_2_or_1_2 | bool - name: Check cert expirys - hosts: oo_nodes_to_config:oo_etcd_to_config:oo_masters_to_config + hosts: oo_nodes_to_config:oo_masters_to_config vars: openshift_certificate_expiry_show_all: yes roles: @@ -18,140 +18,6 @@ # certificates were previously expired. - role: openshift_certificate_expiry -- name: Backup existing etcd CA certificate directories - hosts: oo_etcd_to_config - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - tasks: - - name: Determine if CA certificate directory exists - stat: - path: "{{ etcd_ca_dir }}" - register: etcd_ca_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_ca_dir }} - args: - warn: no - when: etcd_ca_certs_dir_stat.stat.exists | bool - - name: Remove CA certificate directory - file: - path: "{{ etcd_ca_dir }}" - state: absent - when: etcd_ca_certs_dir_stat.stat.exists | bool - -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_etcd_mktemp - changed_when: false - -- name: Distribute etcd CA to etcd hosts - hosts: oo_etcd_to_config - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - tasks: - - name: Create a tarball of the etcd ca certs - command: > - tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz - -C {{ etcd_ca_dir }} . - args: - creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - warn: no - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Retrieve etcd ca cert tarball - fetch: - src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Ensure ca directory exists - file: - path: "{{ etcd_ca_dir }}" - state: directory - - name: Unarchive etcd ca cert tarballs - unarchive: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - - name: Read current etcd CA - slurp: - src: "{{ etcd_conf_dir }}/ca.crt" - register: g_current_etcd_ca_output - - name: Read new etcd CA - slurp: - src: "{{ etcd_ca_dir }}/ca.crt" - register: g_new_etcd_ca_output - - copy: - content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" - dest: "{{ item }}/ca.crt" - with_items: - - "{{ etcd_conf_dir }}" - - "{{ etcd_ca_dir }}" - -- name: Retrieve etcd CA certificate - hosts: oo_first_etcd - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - tasks: - - name: Retrieve etcd CA certificate - fetch: - src: "{{ etcd_conf_dir }}/ca.crt" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - -- name: Distribute etcd CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy CA certificate, key, bundle and serial - copy: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" - dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" - when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_etcd_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../openshift-etcd/restart.yml - # Do not restart etcd when etcd certificates were previously expired. - when: ('expired' not in (hostvars - | oo_select_keys(groups['etcd']) - | oo_collect('check_results.check_results.etcd') - | oo_collect('health'))) - # Update master config when ca-bundle not referenced. Services will be # restarted below after new CA certificate has been distributed. - name: Ensure ca-bundle.crt is referenced in master configuration diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..c42e8781a --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,8 @@ +--- +- include: evaluate_groups.yml + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index b7fd2c0c5..616ba04f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -3,12 +3,12 @@ hosts: oo_etcd_hosts_to_backup roles: - role: openshift_facts - - role: etcd_upgrade - r_etcd_upgrade_action: backup - r_etcd_backup_tag: etcd_backup_tag + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_backup_tag: etcd_backup_tag r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_upgrade_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - r_etcd_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -18,7 +18,7 @@ - set_fact: etcd_backup_completed: "{{ hostvars | oo_select_keys(groups.oo_etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'r_etcd_upgrade_backup_complete': true}) }}" + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index 3e01883ae..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -16,7 +16,8 @@ tasks: - include_role: name: etcd_common - tasks_from: etcdctl.yml + vars: + r_etcd_common_action: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b980909eb..6738ce11f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,6 +3,16 @@ # Upgrade Masters ############################################################################### +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade job storage + hosts: oo_first_master + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -133,6 +143,14 @@ - set_fact: master_update_complete: True +- name: Post master upgrade - Upgrade job storage + hosts: oo_first_master + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + ############################################################################## # Gate on master update complete ############################################################################## @@ -278,6 +296,7 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq post_tasks: - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 91dbc2cd4..35a50cf4e 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -34,6 +34,7 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq - role: openshift_excluder r_openshift_excluder_action: enable r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index e63b03e51..4e7c14e94 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -115,5 +115,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 21e1d440d..45b664d06 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -115,7 +115,7 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5d41b84d0..5b9ac9e8f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -115,5 +115,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index e34259b00..a470c7595 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -115,7 +115,7 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ddc4db8f8..70108fb7a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -20,6 +20,25 @@ - node - .config_managed + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + - set_fact: openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}" when: openshift_master_pod_eviction_timeout is not defined @@ -122,6 +141,8 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - role: nuage_master when: openshift.common.use_nuage | bool - role: calico_master diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml new file mode 100644 index 000000000..4a7252679 --- /dev/null +++ b/roles/ansible_service_broker/defaults/main.yml @@ -0,0 +1,6 @@ +--- + +ansible_service_broker_remove: false +ansible_service_broker_log_level: info +# Recommended you do not enable this for now +ansible_service_broker_launch_apb_on_bind: false diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml new file mode 100644 index 000000000..ec4aafb79 --- /dev/null +++ b/roles/ansible_service_broker/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Fabian von Feilitzsch + description: OpenShift Ansible Service Broker + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.1 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: lib_openshift diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml new file mode 100644 index 000000000..b48583fd4 --- /dev/null +++ b/roles/ansible_service_broker/tasks/install.yml @@ -0,0 +1,268 @@ +--- + +# Fact setting and validations +- name: Set default image variables based on deployment type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: set ansible_service_broker facts + set_fact: + ansible_service_broker_image_prefix: "{{ ansible_service_broker_image_prefix | default(__ansible_service_broker_image_prefix) }}" + ansible_service_broker_image_tag: "{{ ansible_service_broker_image_tag | default(__ansible_service_broker_image_tag) }}" + + ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}" + ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}" + + ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}" + ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}" + ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}" + ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}" + ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}" + +- name: set ansible-service-broker image facts using set prefix and tag + set_fact: + ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}" + ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}" + +- include: validate_facts.yml + + +# Deployment of ansible-service-broker starts here +- name: create openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: present + +- name: create ansible-service-broker serviceaccount + oc_serviceaccount: + name: asb + namespace: openshift-ansible-service-broker + state: present + +- name: create ansible-service-broker service + oc_service: + name: asb + namespace: openshift-ansible-service-broker + state: present + labels: + app: ansible-service-broker + service: asb + ports: + - name: port-1338 + port: 1338 + selector: + app: ansible-service-broker + service: asb + +- name: create etcd service + oc_service: + name: etcd + namespace: openshift-ansible-service-broker + state: present + ports: + - name: etcd-advertise + port: 2379 + selector: + app: ansible-service-broker + service: etcd + +- name: create route for ansible-service-broker service + oc_route: + name: asb-1338 + namespace: openshift-ansible-service-broker + state: present + service_name: asb + port: 1338 + register: asb_route_out + +- name: get ansible-service-broker route name + set_fact: + ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}" + +- name: create persistent volume claim for etcd + oc_obj: + name: etcd + namespace: openshift-ansible-service-broker + state: present + kind: PersistentVolumeClaim + content: + path: /tmp/dcout + data: + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: etcd + namespace: openshift-ansible-service-broker + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +- name: create etcd deployment + oc_obj: + name: etcd + namespace: openshift-ansible-service-broker + state: present + kind: Deployment + content: + path: /tmp/dcout + data: + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: etcd + namespace: openshift-ansible-service-broker + labels: + app: ansible-service-broker + service: etcd + spec: + selector: + matchLabels: + app: ansible-service-broker + service: etcd + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + replicas: 1 + template: + metadata: + labels: + app: ansible-service-broker + service: etcd + spec: + restartPolicy: Always + containers: + - image: "{{ ansible_service_broker_etcd_image }}" + name: etcd + imagePullPolicy: IfNotPresent + terminationMessagePath: /tmp/termination-log + workingDir: /etcd + args: + - /usr/local/bin/etcd + - --data-dir=/data + - --listen-client-urls="http://0.0.0.0:2379" + - --advertise-client-urls="http://0.0.0.0:2379" + ports: + - containerPort: 2379 + protocol: TCP + env: + - name: ETCDCTL_API + value: "3" + volumeMounts: + - mountPath: /data + name: etcd + volumes: + - name: etcd + persistentVolumeClaim: + claimName: etcd + +- name: create ansible-service-broker deployment + oc_obj: + name: asb + namespace: openshift-ansible-service-broker + state: present + kind: Deployment + content: + path: /tmp/dcout + data: + apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + name: asb + namespace: openshift-ansible-service-broker + labels: + app: openshift-ansible-service-broker + service: asb + spec: + strategy: + type: Recreate + replicas: 1 + template: + metadata: + labels: + app: openshift-ansible-service-broker + service: asb + spec: + serviceAccount: asb + restartPolicy: Always + containers: + - image: "{{ ansible_service_broker_image }}" + name: asb + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/ansible-service-broker + ports: + - containerPort: 1338 + protocol: TCP + env: + - name: BROKER_CONFIG + value: /etc/ansible-service-broker/config.yaml + terminationMessagePath: /tmp/termination-log + volumes: + - name: config-volume + configMap: + name: broker-config + items: + - key: broker-config + path: config.yaml + + +# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: +- name: Create config map for ansible-service-broker + oc_obj: + name: broker-config + namespace: openshift-ansible-service-broker + state: present + kind: ConfigMap + content: + path: /tmp/cmout + data: + apiVersion: v1 + kind: ConfigMap + metadata: + name: broker-config + namespace: openshift-ansible-service-broker + labels: + app: ansible-service-broker + data: + broker-config: | + registry: + name: "{{ ansible_service_broker_registry_type }}" + url: "{{ ansible_service_broker_registry_url }}" + user: "{{ ansible_service_broker_registry_user }}" + pass: "{{ ansible_service_broker_registry_password }}" + org: "{{ ansible_service_broker_registry_organization }}" + dao: + etcd_host: etcd + etcd_port: 2379 + log: + logfile: /var/log/ansible-service-broker/asb.log + stdout: true + level: "{{ ansible_service_broker_log_level }}" + color: true + openshift: {} + broker: + devbroker: false + launchapbonbind: "{{ ansible_service_broker_launch_apb_on_bind }}" + +- name: Create the Broker resource in the catalog + oc_obj: + name: ansible-service-broker + state: present + kind: Broker + content: + path: /tmp/brokerout + data: + apiVersion: servicecatalog.k8s.io/v1alpha1 + kind: Broker + metadata: + name: ansible-service-broker + spec: + url: http://{{ ansible_service_broker_route }} diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml new file mode 100644 index 000000000..b46ce8233 --- /dev/null +++ b/roles/ansible_service_broker/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include: install.yml + when: not ansible_service_broker_remove|default(false) | bool + +- include: remove.yml + when: ansible_service_broker_remove|default(false) | bool diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml new file mode 100644 index 000000000..2519f9f4c --- /dev/null +++ b/roles/ansible_service_broker/tasks/remove.yml @@ -0,0 +1,65 @@ +--- + +- name: remove openshift-ansible-service-broker project + oc_project: + name: openshift-ansible-service-broker + state: absent + +- name: remove ansible-service-broker serviceaccount + oc_serviceaccount: + name: asb + namespace: openshift-ansible-service-broker + state: absent + +- name: remove ansible-service-broker service + oc_service: + name: asb + namespace: openshift-ansible-service-broker + state: absent + +- name: remove etcd service + oc_service: + name: etcd + namespace: openshift-ansible-service-broker + state: absent + +- name: remove route for ansible-service-broker service + oc_route: + name: asb-1338 + namespace: openshift-ansible-service-broker + state: absent + +- name: remove persistent volume claim for etcd + oc_pvc: + name: etcd + namespace: openshift-ansible-service-broker + state: absent + +- name: remove etcd deployment + oc_obj: + name: etcd + namespace: openshift-ansible-service-broker + state: absent + kind: Deployment + +- name: remove ansible-service-broker deployment + oc_obj: + name: asb + namespace: openshift-ansible-service-broker + state: absent + kind: Deployment + +# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following: +- name: remove config map for ansible-service-broker + oc_obj: + name: broker-config + namespace: openshift-ansible-service-broker + state: absent + kind: ConfigMap + +# TODO: Is this going to work? +- name: remove broker object from the catalog + oc_obj: + name: ansible-service-broker + state: absent + kind: Broker diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml new file mode 100644 index 000000000..604d24e1d --- /dev/null +++ b/roles/ansible_service_broker/tasks/validate_facts.yml @@ -0,0 +1,15 @@ +--- +- name: validate Dockerhub registry settings + fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters" + when: + - ansible_service_broker_registry_type == 'dockerhub' + - not (ansible_service_broker_registry_user and + ansible_service_broker_registry_password and + ansible_service_broker_registry_organization) + + +- name: validate RHCC registry settings + fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url" + when: + - ansible_service_broker_registry_type == 'rhcc' + - not ansible_service_broker_registry_url diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml new file mode 100644 index 000000000..b0b3835e3 --- /dev/null +++ b/roles/ansible_service_broker/vars/default_images.yml @@ -0,0 +1,13 @@ +--- + +__ansible_service_broker_image_prefix: ansibleplaybookbundle/ +__ansible_service_broker_image_tag: latest + +__ansible_service_broker_etcd_image_prefix: quay.io/coreos/ +__ansible_service_broker_etcd_image_tag: latest + +__ansible_service_broker_registry_type: dockerhub +__ansible_service_broker_registry_url: null +__ansible_service_broker_registry_user: null +__ansible_service_broker_registry_password: null +__ansible_service_broker_registry_organization: null diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml new file mode 100644 index 000000000..a6d999647 --- /dev/null +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -0,0 +1,13 @@ +--- + +__ansible_service_broker_image_prefix: openshift3/ +__ansible_service_broker_image_tag: latest + +__ansible_service_broker_etcd_image_prefix: rhel7/ +__ansible_service_broker_etcd_image_tag: latest + +__ansible_service_broker_registry_type: rhcc +__ansible_service_broker_registry_url: "https://registry.access.redhat.com" +__ansible_service_broker_registry_user: null +__ansible_service_broker_registry_password: null +__ansible_service_broker_registry_organization: null diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml index c7eea46f2..207dee068 100644 --- a/roles/calico/defaults/main.yaml +++ b/roles/calico/defaults/main.yaml @@ -3,13 +3,13 @@ kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshif cni_conf_dir: "/etc/cni/net.d/" cni_bin_dir: "/opt/cni/bin/" -cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz" +cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz" -calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico" -calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam" +calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico" +calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam" calico_ipv4pool_ipip: "always" calico_ipv4pool_cidr: "192.168.0.0/16" calico_log_dir: "/var/log/calico" -calico_node_image: "calico/node:v1.1.0" +calico_node_image: "calico/node:v1.2.1" diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml index 5b324bce5..b2df0105f 100644 --- a/roles/calico_master/defaults/main.yaml +++ b/roles/calico_master/defaults/main.yaml @@ -4,3 +4,4 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf calicoctl_bin_dir: "/usr/local/bin/" calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl" +calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4" diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2 index 1b87758ce..811884473 100644 --- a/roles/calico_master/templates/calico-policy-controller.yml.j2 +++ b/roles/calico_master/templates/calico-policy-controller.yml.j2 @@ -74,7 +74,7 @@ spec: serviceAccountName: calico containers: - name: calico-policy-controller - image: quay.io/calico/kube-policy-controller:v0.5.4 + image: {{ calico_url_policy_controller }} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index bddad778f..57f49ea11 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -3,7 +3,7 @@ package: name={{ item }} state=present with_items: - cockpit-ws - - cockpit-shell + - cockpit-system - cockpit-bridge - cockpit-docker - "{{ cockpit_plugins }}" diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index 1ccae61f2..8c4d19537 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -1,12 +1,12 @@ --- # The version of Contiv binaries to use -contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC +contiv_version: 1.0.1 # The version of cni binaries cni_version: v0.4.0 -contiv_default_subnet: "20.1.1.1/24" -contiv_default_gw: "20.1.1.254" +contiv_default_subnet: "10.128.0.0/16" +contiv_default_gw: "10.128.254.254" # TCP port that Netmaster listens for network connections netmaster_port: 9999 @@ -69,6 +69,9 @@ netplugin_fwd_mode: bridge # Contiv fabric mode aci|default contiv_fabric_mode: default +# Global VLAN range +contiv_vlan_range: "2900-3000" + # Encapsulation type vlan|vxlan to use for instantiating container networks contiv_encap_mode: vlan @@ -78,8 +81,8 @@ netplugin_driver: ovs # Create a default Contiv network for use by pods contiv_default_network: true -# VLAN/ VXLAN tag value to be used for the default network -contiv_default_network_tag: 1 +# Statically configured tag for default network (if needed) +contiv_default_network_tag: "" #SRFIXME (use the openshift variables) https_proxy: "" @@ -95,6 +98,9 @@ apic_leaf_nodes: "" apic_phys_dom: "" apic_contracts_unrestricted_mode: no apic_epg_bridge_domain: not_specified +apic_configure_default_policy: false +apic_default_external_contract: "uni/tn-common/brc-default" +apic_default_app_profile: "contiv-infra-app-profile" is_atomic: False kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master" master_name: "{{ groups['masters'][0] }}" @@ -104,3 +110,12 @@ kube_ca_cert: "{{ kube_cert_dir }}/ca.crt" kube_key: "{{ kube_cert_dir }}/admin.key" kube_cert: "{{ kube_cert_dir }}/admin.crt" kube_master_api_port: 8443 + +# contivh1 default subnet and gateway +#contiv_h1_subnet_default: "132.1.1.0/24" +#contiv_h1_gw_default: "132.1.1.1" +contiv_h1_subnet_default: "10.129.0.0/16" +contiv_h1_gw_default: "10.129.0.1" + +# contiv default private subnet for ext access +contiv_private_ext_subnet: "10.130.0.0/16" diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index 3223afb6e..da6409f1e 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -26,3 +26,5 @@ dependencies: etcd_url_scheme: http etcd_peer_url_scheme: http when: contiv_role == "netmaster" +- role: contiv_auth_proxy + when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true) diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index 9cf98bb80..f679443e0 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -6,10 +6,53 @@ retries: 9 delay: 10 +- name: Contiv | Set globals + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' + +- name: Contiv | Set arp mode to flood if ACI + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' + when: contiv_fabric_mode == "aci" + - name: Contiv | Check if default-net exists command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' register: net_result - name: Contiv | Create default-net - command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway={{ contiv_default_gw }} default-net' + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 + +- name: Contiv | Create host access infra network for VxLan routing case + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' + when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") + +#- name: Contiv | Create an allow-all policy for the default-group +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' +# when: contiv_fabric_mode == "aci" + +- name: Contiv | Set up aci external contract to consume default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +- name: Contiv | Set up aci external contract to provide default external contract + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +- name: Contiv | Create aci default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' + when: contiv_fabric_mode == "aci" + +- name: Contiv | Add external contracts to the default-group + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' + when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + +#- name: Contiv | Add policy rule 1 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' +# when: contiv_fabric_mode == "aci" + +#- name: Contiv | Add policy rule 2 for allow-all policy +# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' +# when: contiv_fabric_mode == "aci" + +- name: Contiv | Create default aci app profile + command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' + when: contiv_fabric_mode == "aci" diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml index 5057767b8..acaf7386e 100644 --- a/roles/contiv/tasks/netmaster.yml +++ b/roles/contiv/tasks/netmaster.yml @@ -23,7 +23,7 @@ line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster" state: present when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined - with_items: groups['masters'] + with_items: "{{ groups['masters'] }}" - name: Netmaster | Create netmaster symlinks file: diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml index 8c348ac67..184c595c5 100644 --- a/roles/contiv/tasks/netplugin_iptables.yml +++ b/roles/contiv/tasks/netplugin_iptables.yml @@ -23,7 +23,36 @@ notify: Save iptables rules - name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "vxlan" + command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472" + when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1 + notify: Save iptables rules - name: Netplugin IPtables | Open vxlan port with iptables - command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "vxlan" + command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789" + when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow from contivh0 + command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input" + when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow to contivh0 + command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output" + when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow from contivh1 + command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input" + when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow to contivh1 + command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output" + when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1 + notify: Save iptables rules + +- name: Netplugin IPtables | Allow dns + command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns" + when: iptablesrules.stdout.find("contiv dns") == -1 + notify: Save iptables rules diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index 2eff1b85f..e0d48e643 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -4,9 +4,10 @@ did_install: false - include: pkgMgrInstallers/centos-install.yml - when: ansible_distribution == "CentOS" and not is_atomic + when: (ansible_os_family == "RedHat") and + not is_atomic - name: Package Manager | Set fact saying we did CentOS package install set_fact: did_install: true - when: ansible_distribution == "CentOS" + when: (ansible_os_family == "RedHat") diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 51c3d35ac..91e6aadf3 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -1,13 +1,13 @@ --- -- name: PkgMgr CentOS | Install net-tools pkg for route +- name: PkgMgr RHEL/CentOS | Install net-tools pkg for route yum: pkg=net-tools state=latest -- name: PkgMgr CentOS | Get openstack kilo rpm +- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm get_url: - url: https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-2.noarch.rpm - dest: /tmp/rdo-release-kilo-2.noarch.rpm + url: https://repos.fedorapeople.org/repos/openstack/openstack-ocata/rdo-release-ocata-2.noarch.rpm + dest: /tmp/rdo-release-ocata-2.noarch.rpm validate_certs: False environment: http_proxy: "{{ http_proxy|default('') }}" @@ -16,15 +16,15 @@ tags: - ovs_install -- name: PkgMgr CentOS | Install openstack kilo rpm - yum: name=/tmp/rdo-release-kilo-2.noarch.rpm state=present +- name: PkgMgr RHEL/CentOS | Install openstack ocata rpm + yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present tags: - ovs_install -- name: PkgMgr CentOS | Install ovs +- name: PkgMgr RHEL/CentOS | Install ovs yum: - pkg=openvswitch - state=latest + pkg=openvswitch-2.5.0-2.el7.x86_64 + state=present environment: http_proxy: "{{ http_proxy|default('') }}" https_proxy: "{{ https_proxy|default('') }}" diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2 index f3d26c037..a4928cc3d 100644 --- a/roles/contiv/templates/netplugin.j2 +++ b/roles/contiv/templates/netplugin.j2 @@ -1,9 +1,7 @@ {% if contiv_encap_mode == "vlan" %} NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' {% endif %} -{# Note: Commenting out vxlan encap mode support until it is fully supported {% if contiv_encap_mode == "vxlan" %} -NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -e {{contiv_encap_mode}} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' +NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}' {% endif %} -#} diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md new file mode 100644 index 000000000..287b6c148 --- /dev/null +++ b/roles/contiv_auth_proxy/README.md @@ -0,0 +1,29 @@ +Role Name +========= + +Role to install Contiv API Proxy and UI + +Requirements +------------ + +Docker needs to be installed to run the auth proxy container. + +Role Variables +-------------- + +auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container. +auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates. +auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address. + +Dependencies +------------ + +docker + +Example Playbook +---------------- + +- hosts: netplugin-node + become: true + roles: + - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 } diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml new file mode 100644 index 000000000..4e637a947 --- /dev/null +++ b/roles/contiv_auth_proxy/defaults/main.yml @@ -0,0 +1,11 @@ +--- +auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2" +auth_proxy_port: 10000 +contiv_certs: "/var/contiv/certs" +cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379" +auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem" +auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem" +auth_proxy_datastore: "{{ cluster_store }}" +auth_proxy_binaries: "/var/contiv_cache" +auth_proxy_local_install: False +auth_proxy_rule_comment: "Contiv auth proxy service" diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service new file mode 100644 index 000000000..7cd2edff1 --- /dev/null +++ b/roles/contiv_auth_proxy/files/auth-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=Contiv Proxy and UI +After=auditd.service systemd-user-sessions.service time-sync.target docker.service + +[Service] +ExecStart=/usr/bin/auth_proxy.sh start +ExecStop=/usr/bin/auth_proxy.sh stop +KillMode=control-group +Restart=on-failure +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml new file mode 100644 index 000000000..9cb9bea49 --- /dev/null +++ b/roles/contiv_auth_proxy/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for auth_proxy diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml new file mode 100644 index 000000000..a29659cc9 --- /dev/null +++ b/roles/contiv_auth_proxy/tasks/cleanup.yml @@ -0,0 +1,10 @@ +--- + +- name: stop auth-proxy container + service: name=auth-proxy state=stopped + +- name: cleanup iptables for auth proxy + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ auth_proxy_port }}" diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml new file mode 100644 index 000000000..74e7bf794 --- /dev/null +++ b/roles/contiv_auth_proxy/tasks/main.yml @@ -0,0 +1,37 @@ +--- +# tasks file for auth_proxy +- name: setup iptables for auth proxy + shell: > + ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ auth_proxy_port }}" + +# Load the auth-proxy-image from local tar. Ignore any errors to handle the +# case where the image is not built in +- name: copy auth-proxy image + copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar + when: auth_proxy_local_install == True + +- name: load auth-proxy image + shell: docker load -i /tmp/auth-proxy-image.tar + when: auth_proxy_local_install == True + +- name: create cert folder for proxy + file: path=/var/contiv/certs state=directory + +- name: copy shell script for starting auth-proxy + template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx + +- name: copy cert for starting auth-proxy + copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r + +- name: copy key for starting auth-proxy + copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r + +- name: copy systemd units for auth-proxy + copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service + +- name: start auth-proxy container + systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2 new file mode 100644 index 000000000..e82e5b4ab --- /dev/null +++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2 @@ -0,0 +1,36 @@ +#!/bin/bash + +usage="$0 start/stop" +if [ $# -ne 1 ]; then + echo USAGE: $usage + exit 1 +fi + +case $1 in +start) + set -e + + /usr/bin/docker run --rm \ + -p 10000:{{ auth_proxy_port }} \ + --net=host --name=auth-proxy \ + -e NO_NETMASTER_STARTUP_CHECK=1 \ + -v /var/contiv:/var/contiv \ + {{ auth_proxy_image }} \ + --tls-key-file={{ auth_proxy_key }} \ + --tls-certificate={{ auth_proxy_cert }} \ + --data-store-address={{ auth_proxy_datastore }} \ + --netmaster-address={{ service_vip }}:9999 \ + --listen-address=:10000 + ;; + +stop) + # don't stop on error + /usr/bin/docker stop auth-proxy + /usr/bin/docker rm -f -v auth-proxy + ;; + +*) + echo USAGE: $usage + exit 1 + ;; +esac diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory new file mode 100644 index 000000000..d18580b3c --- /dev/null +++ b/roles/contiv_auth_proxy/tests/inventory @@ -0,0 +1 @@ +localhost
\ No newline at end of file diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml new file mode 100644 index 000000000..2af3250cd --- /dev/null +++ b/roles/contiv_auth_proxy/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - auth_proxy diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml new file mode 100644 index 000000000..9032766c4 --- /dev/null +++ b/roles/contiv_auth_proxy/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for auth_proxy diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml index a6c08fa63..7b8150954 100644 --- a/roles/contiv_facts/defaults/main.yaml +++ b/roles/contiv_facts/defaults/main.yaml @@ -8,3 +8,6 @@ bin_dir: /usr/bin ansible_temp_dir: /tmp/.ansible/files source_type: packageManager + +# Whether or not to also install and enable the Contiv auth_proxy +contiv_enable_auth_proxy: false diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index fa2f44609..586aebb11 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -122,7 +122,8 @@ - include_role: name: etcd_common - tasks_from: etcdctl.yml + vars: + r_etcd_common_action: drop_etcdctl when: openshift_etcd_etcdctl_profile | default(true) | bool - name: Set fact etcd_service_status_changed diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index 72ffadbd2..a01df66b3 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -15,6 +15,63 @@ {%- endif -%} {% endfor -%} +- name: Check etcd system container package + command: > + atomic containers list --no-trunc -a -f container=etcd -f backend=ostree + register: etcd_result + +- name: Unmask etcd service + systemd: + name: etcd + state: stopped + enabled: no + masked: no + daemon_reload: yes + register: task_result + failed_when: task_result|failed and 'could not' not in task_result.msg|lower + when: "'etcd' not in etcd_result.stdout" + +- name: Disable etcd_container + systemd: + name: etcd_container + state: stopped + enabled: no + daemon_reload: yes + register: task_result + failed_when: task_result|failed and 'could not' not in task_result.msg|lower + +- name: Remove etcd_container.service + file: + path: /etc/systemd/system/etcd_container.service + state: absent + +- name: Systemd reload configuration + systemd: name=etcd_container daemon_reload=yes + +- name: Check for previous etcd data store + stat: + path: "{{ etcd_data_dir }}/member/" + register: src_datastore + +- name: Check for etcd system container data store + stat: + path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member" + register: dest_datastore + +- name: Ensure that etcd system container data dirs exist + file: path="{{ item }}" state=directory + with_items: + - "{{ r_etcd_common_system_container_host_dir }}/etc" + - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd" + +- name: Copy etcd data store + command: > + cp -a {{ etcd_data_dir }}/member + {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member + when: + - src_datastore.stat.exists + - not dest_datastore.stat.exists + - name: Install or Update Etcd system container package oc_atomic_container: name: etcd @@ -35,3 +92,5 @@ - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key + - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt + - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index de0739c3e..b5b38c1e1 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -1,10 +1,21 @@ --- +# Default action when calling this role +r_etcd_common_action: noop +r_etcd_common_backup_tag: '' +r_etcd_common_backup_sufix_name: '' + # runc, docker, host r_etcd_common_etcd_runtime: "docker" r_etcd_common_embedded_etcd: false +# etcd run on a host => use etcdctl command directly +# etcd run as a docker container => use docker exec +# etcd run as a runc container => use runc exec +r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_common_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" + # etcd server vars -etcd_conf_dir: "{{ '/etc/etcd' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/etc' }}" +etcd_conf_dir: '/etc/etcd' +r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd etcd_system_container_conf_dir: /var/lib/etcd/etc etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf" etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt" @@ -41,7 +52,7 @@ etcd_is_containerized: False etcd_is_thirdparty: False # etcd dir vars -etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}" +etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}" # etcd ports and protocols etcd_client_port: 2379 diff --git a/roles/etcd_upgrade/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml index 1ea6fc59f..4a4832275 100644 --- a/roles/etcd_upgrade/tasks/backup.yml +++ b/roles/etcd_common/tasks/backup.yml @@ -1,15 +1,11 @@ --- -# INPUT r_etcd_backup_sufix_name -# INPUT r_etcd_backup_tag -# OUTPUT r_etcd_upgrade_backup_complete - set_fact: - # ORIGIN etcd_data_dir etcd_common.defaults - l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_backup_tag | default('') }}{{ r_etcd_backup_sufix_name }}" + l_etcd_backup_dir: "{{ etcd_data_dir }}/openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}" # TODO: replace shell module with command and update later checks - name: Check available disk space for etcd backup shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1 - register: avail_disk + register: l_avail_disk # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false @@ -17,8 +13,8 @@ # TODO: replace shell module with command and update later checks - name: Check current etcd disk usage shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: r_etcd_upgrade_embedded_etcd | bool + register: l_etcd_disk_usage + when: r_etcd_common_embedded_etcd | bool # AUDIT:changed_when: `false` because we are only inspecting # state, not manipulating anything changed_when: false @@ -26,9 +22,9 @@ - name: Abort if insufficient disk space for etcd backup fail: msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (r_etcd_upgrade_embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + {{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ l_avail_disk.stdout }} Kb available. + when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int) # For non containerized and non embedded we should have the correct version of # etcd installed already. So don't do anything. @@ -37,17 +33,22 @@ # # For embedded non containerized we need to ensure we have the latest version # etcd on the host. +- name: Detecting Atomic Host Operating System + stat: + path: /run/ostree-booted + register: l_ostree_booted + - name: Install latest etcd for embedded package: name: etcd state: latest when: - - r_etcd_upgrade_embedded_etcd | bool + - r_etcd_common_embedded_etcd | bool - not l_ostree_booted.stat.exists | bool - name: Generate etcd backup command: > - {{ etcdctl_command }} backup --data-dir={{ etcd_data_dir }} + {{ r_etcd_common_etcdctl_command }} backup --data-dir={{ etcd_data_dir }} --backup-dir={{ l_etcd_backup_dir }} # According to the docs change you can simply copy snap/db @@ -55,16 +56,16 @@ - name: Check for v3 data store stat: path: "{{ etcd_data_dir }}/member/snap/db" - register: v3_db + register: l_v3_db - name: Copy etcd v3 data store command: > cp -a {{ etcd_data_dir }}/member/snap/db {{ l_etcd_backup_dir }}/member/snap/ - when: v3_db.stat.exists + when: l_v3_db.stat.exists - set_fact: - r_etcd_upgrade_backup_complete: True + r_etcd_common_backup_complete: True - name: Display location of etcd backup debug: diff --git a/roles/etcd_common/tasks/etcdctl.yml b/roles/etcd_common/tasks/drop_etcdctl.yml index 6cb456677..6cb456677 100644 --- a/roles/etcd_common/tasks/etcdctl.yml +++ b/roles/etcd_common/tasks/drop_etcdctl.yml diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml new file mode 100644 index 000000000..6ed87e6c7 --- /dev/null +++ b/roles/etcd_common/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: Fail if invalid r_etcd_common_action provided + fail: + msg: "etcd_common role can only be called with 'noop' or 'backup' or 'drop_etcdctl'" + when: r_etcd_common_action not in ['noop', 'backup', 'drop_etcdctl'] + +- name: Include main action task file + include: "{{ r_etcd_common_action }}.yml" + when: r_etcd_common_action != "noop" diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md new file mode 100644 index 000000000..369e78ff2 --- /dev/null +++ b/roles/etcd_migrate/README.md @@ -0,0 +1,53 @@ +Role Name +========= + +Offline etcd migration of data from v2 to v3 + +Requirements +------------ + +It is expected all consumers of the etcd data are not accessing the data. +Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster. + +The role itself is responsible for: +- checking etcd cluster health and raft status before the migration +- checking of presence of any v3 data (in that case the migration is stopped) +- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string) +- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value) + +The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started. + +Role Variables +-------------- + +TBD + +Dependencies +------------ + +- etcd_common +- lib_utils + +Example Playbook +---------------- + +```yaml +- name: Migrate etcd data from v2 to v3 + hosts: oo_etcd_to_config + gather_facts: no + tasks: + - include_role: + name: openshift_etcd_migrate + vars: + etcd_peer: "{{ ansible_default_ipv4.address }}" +``` + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jan Chaloupka (jchaloup@redhat.com) diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml new file mode 100644 index 000000000..05cf41fbb --- /dev/null +++ b/roles/etcd_migrate/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Default action when calling this role, choices: check, migrate, configure +r_etcd_migrate_action: migrate diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml new file mode 100644 index 000000000..f3cabbef6 --- /dev/null +++ b/roles/etcd_migrate/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: Jan Chaloupka + description: Etcd migration + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.1 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: etcd_common } +- { role: lib_utils } diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml new file mode 100644 index 000000000..2f07713bc --- /dev/null +++ b/roles/etcd_migrate/tasks/check.yml @@ -0,0 +1,55 @@ +--- +# Check the cluster is healthy +- include: check_cluster_health.yml + +# Check if the member has v3 data already +# Run the migration only if the data are v2 +- name: Check if there are any v3 data + command: > + etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:2379' get "" --from-key --keys-only -w json --limit 1 + environment: + ETCDCTL_API: 3 + register: l_etcdctl_output + +- fail: + msg: "Unable to get a number of v3 keys" + when: l_etcdctl_output.rc != 0 + +- fail: + msg: "The etcd has at least one v3 key" + when: "'count' in (l_etcdctl_output.stdout | from_json) and (l_etcdctl_output.stdout | from_json).count != 0" + + +# TODO(jchaloup): once the until loop can be used over include/block, +# remove the repetive code +# - until loop not supported over include statement (nor block) +# https://github.com/ansible/ansible/issues/17098 +# - with_items not supported over block + +# Check the cluster status for the first time +- include: check_cluster_status.yml + +# Check the cluster status for the second time +- block: + - debug: + msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}" + - name: Wait a while before another check + pause: + seconds: 5 + when: not l_etcd_cluster_status_ok | bool + + - include: check_cluster_status.yml + when: not l_etcd_cluster_status_ok | bool + + +# Check the cluster status for the third time +- block: + - debug: + msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}" + - name: Wait a while before another check + pause: + seconds: 5 + when: not l_etcd_cluster_status_ok | bool + + - include: check_cluster_status.yml + when: not l_etcd_cluster_status_ok | bool diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml new file mode 100644 index 000000000..1abd6a32f --- /dev/null +++ b/roles/etcd_migrate/tasks/check_cluster_health.yml @@ -0,0 +1,23 @@ +--- +- name: Check cluster health + command: > + etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://{{ etcd_peer }}:2379 cluster-health + register: etcd_cluster_health + changed_when: false + failed_when: false + +- name: Assume a member is not healthy + set_fact: + etcd_member_healthy: false + +- name: Get member item health status + set_fact: + etcd_member_healthy: true + with_items: "{{ etcd_cluster_health.stdout_lines }}" + when: "(etcd_peer in item) and ('is healthy' in item)" + +- name: Check the etcd cluster health + # TODO(jchaloup): should we fail or ask user if he wants to continue? Or just wait until the cluster is healthy? + fail: + msg: "Etcd member {{ etcd_peer }} is not healthy" + when: not etcd_member_healthy diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml new file mode 100644 index 000000000..90fe385c1 --- /dev/null +++ b/roles/etcd_migrate/tasks/check_cluster_status.yml @@ -0,0 +1,32 @@ +--- +# etcd_ip originates from etcd_common role +- name: Check cluster status + command: > + etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints 'https://{{ etcd_peer }}:2379' -w json endpoint status + environment: + ETCDCTL_API: 3 + register: l_etcd_cluster_status + +- name: Retrieve raftIndex + set_fact: + etcd_member_raft_index: "{{ (l_etcd_cluster_status.stdout | from_json)[0]['Status']['raftIndex'] }}" + +- block: + # http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers + - name: Group all raftIndices into a list + set_fact: + etcd_members_raft_indices: "{{ groups['oo_etcd_to_config'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}" + + - name: Check the minimum and the maximum of raftIndices is at most 1 + set_fact: + etcd_members_raft_indices_diff: "{{ ((etcd_members_raft_indices | max | int) - (etcd_members_raft_indices | min | int)) | int }}" + + - debug: + msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}" + + when: inventory_hostname in groups.oo_etcd_to_config[0] + +# The cluster raft status is ok if the difference of the max and min raft index is at most 1 +- name: capture the status + set_fact: + l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_config[0]]['etcd_members_raft_indices_diff'] | int < 2 }}" diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd_migrate/tasks/configure.yml new file mode 100644 index 000000000..a305d5bf3 --- /dev/null +++ b/roles/etcd_migrate/tasks/configure.yml @@ -0,0 +1,13 @@ +--- +- name: Configure master to use etcd3 storage backend + yedit: + src: /etc/origin/master/master-config.yaml + key: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - key: kubernetesMasterConfig.apiServerArguments.storage-backend + value: + - etcd3 + - key: kubernetesMasterConfig.apiServerArguments.storage-media-type + value: + - application/vnd.kubernetes.protobuf diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml new file mode 100644 index 000000000..409b0b613 --- /dev/null +++ b/roles/etcd_migrate/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Fail if invalid r_etcd_migrate_action provided + fail: + msg: "etcd_migrate role can only be called with 'check' or 'migrate' or 'configure'" + when: r_etcd_migrate_action not in ['check', 'migrate', 'configure'] + +- name: Include main action task file + include: "{{ r_etcd_migrate_action }}.yml" + +# 2. migrate v2 datadir into v3: +# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl +# backup the etcd datadir first +# Provide a way for an operator to specify transformer + +# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml +# set storage-backend to “etcd3” +# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master)) + +# Run +# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health +# to check the cluster health (from the etcdctl.sh aliases file) + +# Another assumption: +# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting) +# - diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml new file mode 100644 index 000000000..cb479b0cc --- /dev/null +++ b/roles/etcd_migrate/tasks/migrate.yml @@ -0,0 +1,53 @@ +--- +# Should this be run in a serial manner? +- set_fact: + l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + +- name: Disable etcd members + service: + name: "{{ l_etcd_service }}" + state: stopped + +# Should we skip all TTL keys? https://bugzilla.redhat.com/show_bug.cgi?id=1389773 +- name: Migrate etcd data + command: > + etcdctl migrate --data-dir={{ etcd_data_dir }} + environment: + ETCDCTL_API: 3 + register: l_etcdctl_migrate + +# TODO(jchaloup): If any of the members fails, we need to restore all members to v2 from the pre-migrate backup +- name: Check the etcd v2 data are correctly migrated + fail: + msg: "Failed to migrate a member" + when: "'finished transforming keys' not in l_etcdctl_migrate.stdout" + +# TODO(jchaloup): start the etcd on a different port so noone can access it +# Once the validation is done +- name: Enable etcd member + service: + name: "{{ l_etcd_service }}" + state: started + +- name: Re-introduce leases (as a replacement for key TTLs) + command: > + oadm migrate etcd-ttl \ + --cert {{ etcd_peer_cert_file }} \ + --key {{ etcd_peer_key_file }} \ + --cacert {{ etcd_peer_ca_file }} \ + --etcd-address 'https://{{ etcd_peer }}:2379' \ + --ttl-keys-prefix {{ item }} \ + --lease-duration 1h + environment: + ETCDCTL_API: 3 + with_items: + - "/kubernetes.io/events" + - "/kubernetes.io/masterleases" + +- set_fact: + r_etcd_migrate_success: true + +- name: Enable etcd member + service: + name: "{{ l_etcd_service }}" + state: started diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml index 3ac7f3401..4795188a6 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd_server_certificates/tasks/main.yml @@ -5,11 +5,14 @@ - name: Check status of etcd certificates stat: - path: "{{ etcd_cert_config_dir }}/{{ item }}" + path: "{{ item }}" with_items: - - "{{ etcd_cert_prefix }}server.crt" - - "{{ etcd_cert_prefix }}peer.crt" - - "{{ etcd_cert_prefix }}ca.crt" + - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt" + - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt" + - "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt" + - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt" + - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt" + - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt" register: g_etcd_server_cert_stat_result when: not etcd_certificates_redeploy | default(false) | bool @@ -132,8 +135,11 @@ - name: Ensure certificate directory exists file: - path: "{{ etcd_cert_config_dir }}" + path: "{{ item }}" state: directory + with_items: + - "{{ etcd_cert_config_dir }}" + - "{{ etcd_system_container_cert_config_dir }}" when: etcd_server_certs_missing | bool - name: Unarchive cert tarball @@ -164,15 +170,28 @@ - name: Ensure ca directory exists file: - path: "{{ etcd_ca_dir }}" + path: "{{ item }}" state: directory + with_items: + - "{{ etcd_ca_dir }}" + - "{{ etcd_system_container_cert_config_dir }}/ca" when: etcd_server_certs_missing | bool -- name: Unarchive etcd ca cert tarballs +- name: Unarchive cert tarball for the system container + unarchive: + src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz" + dest: "{{ etcd_system_container_cert_config_dir }}" + when: + - etcd_server_certs_missing | bool + - r_etcd_common_etcd_runtime == 'runc' + +- name: Unarchive etcd ca cert tarballs for the system container unarchive: src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - when: etcd_server_certs_missing | bool + dest: "{{ etcd_system_container_cert_config_dir }}/ca" + when: + - etcd_server_certs_missing | bool + - r_etcd_common_etcd_runtime == 'runc' - name: Delete temporary directory local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml index b61bf526c..61bbba225 100644 --- a/roles/etcd_upgrade/defaults/main.yml +++ b/roles/etcd_upgrade/defaults/main.yml @@ -1,9 +1,3 @@ --- r_etcd_upgrade_action: upgrade r_etcd_upgrade_mechanism: rpm -r_etcd_upgrade_embedded_etcd: false -r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}" -# etcd run on a host => use etcdctl command directly -# etcd run as a docker container => use docker exec -# etcd run as a runc container => use runc exec -etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'host' or r_etcd_upgrade_embedded_etcd | bool else 'docker exec etcd_container etcdctl' if r_etcd_common_etcd_runtime == 'docker' else 'runc exec etcd etcdctl' }}" diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml index 5178c14e3..129c69d6b 100644 --- a/roles/etcd_upgrade/tasks/main.yml +++ b/roles/etcd_upgrade/tasks/main.yml @@ -2,9 +2,9 @@ # INPUT r_etcd_upgrade_action - name: Fail if invalid etcd_upgrade_action provided fail: - msg: "etcd_upgrade role can only be called with 'upgrade' or 'backup'" + msg: "etcd_upgrade role can only be called with 'upgrade'" when: - - r_etcd_upgrade_action not in ['upgrade', 'backup'] + - r_etcd_upgrade_action not in ['upgrade'] - name: Detecting Atomic Host Operating System stat: diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 3974cc4dd..1b73bfd0e 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1097,10 +1097,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1110,34 +1106,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 320eac17e..b09321a5b 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1083,10 +1083,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1096,34 +1092,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index f9658d6e1..221ef5094 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1069,10 +1069,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1082,34 +1078,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 0bdfd0bad..071562875 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1069,10 +1069,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1082,34 +1078,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index df0e40d20..bf2650460 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1187,10 +1187,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1200,34 +1196,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 8af8cb196..a2b7d12c0 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1212,10 +1212,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1225,34 +1221,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py index 1e017a576..955c6313e 100644 --- a/roles/lib_openshift/library/oc_atomic_container.py +++ b/roles/lib_openshift/library/oc_atomic_container.py @@ -65,8 +65,11 @@ options: # -*- -*- -*- Begin included fragment: ansible/oc_atomic_container.py -*- -*- -*- -# pylint: disable=wrong-import-position,too-many-branches,invalid-name +# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error import json + +from distutils.version import StrictVersion + from ansible.module_utils.basic import AnsibleModule @@ -191,9 +194,15 @@ def main(): ) # Verify that the platform supports atomic command - rc, _, err = module.run_command('atomic -v', check_rc=False) + rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False) if rc != 0: module.fail_json(msg="Error in running atomic command", err=err) + # This module requires atomic version 1.17.2 or later + atomic_version = StrictVersion(version_out.replace('\n', '')) + if atomic_version < StrictVersion('1.17.2'): + module.fail_json( + msg="atomic version 1.17.2+ is required", + err=str(atomic_version)) try: core(module) diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 3ed0d65dc..289f08b83 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1061,10 +1061,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1074,34 +1070,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 5c8ed48d2..7cd29215f 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1067,10 +1067,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1080,34 +1076,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index f3b6d552d..5b11f45ba 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1111,10 +1111,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1124,34 +1120,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index c6421128a..d3834ce0c 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1078,10 +1078,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1091,34 +1087,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index a791c29af..0d751fe28 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1051,10 +1051,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1064,34 +1060,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index bbc123ce0..3a6ba3e56 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1070,10 +1070,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1083,34 +1079,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index cd1afd0d2..5db036b23 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1087,10 +1087,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1100,34 +1096,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 215723cc8..9b0c0e0e4 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -90,9 +90,9 @@ options: required: false default: str aliases: [] - all_namespace: + all_namespaces: description: - - The namespace where the object lives. + - Search in all namespaces for the object. required: false default: false aliases: [] @@ -1090,10 +1090,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1103,34 +1099,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' + + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) - else: + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval @@ -1473,7 +1461,12 @@ class OCObject(OpenShiftCLI): def delete(self): '''delete the object''' - return self._delete(self.kind, name=self.name, selector=self.selector) + results = self._delete(self.kind, name=self.name, selector=self.selector) + if (results['returncode'] != 0 and 'stderr' in results and + '\"{}\" not found'.format(self.name) in results['stderr']): + results['returncode'] = 0 + + return results def create(self, files=None, content=None): ''' @@ -1557,7 +1550,8 @@ class OCObject(OpenShiftCLI): if state == 'absent': # verify its not in our results if (params['name'] is not None or params['selector'] is not None) and \ - (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): + (len(api_rval['results']) == 0 or \ + ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 358ef5130..130521761 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1022,10 +1022,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1035,34 +1031,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 025b846c6..c6568d520 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1079,10 +1079,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1092,34 +1088,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 05dfddab8..a78bc06d2 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1076,10 +1076,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1089,34 +1085,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index d7de4964c..a88639bfc 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1071,10 +1071,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1084,34 +1080,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 3090b4cad..0c0bc9386 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1121,10 +1121,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1134,34 +1130,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 6a505fb6b..f112b6dd0 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1065,10 +1065,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1078,34 +1074,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index db6e682d0..d762e0c38 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1117,10 +1117,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1130,34 +1126,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 308f45488..769b75e15 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1124,10 +1124,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1137,34 +1133,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 68c1fc51c..446987eff 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1063,10 +1063,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1076,34 +1072,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index ebc5bf8a2..c7eb1986a 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1063,10 +1063,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1076,34 +1072,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index d1a20fddc..3a98693b7 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1123,10 +1123,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1136,34 +1132,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 548c9d8e0..939261526 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1035,10 +1035,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1048,34 +1044,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 3826cd8e5..41e7d0ab8 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1112,10 +1112,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -1125,34 +1121,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py index 1a5ab6869..7b81760df 100644 --- a/roles/lib_openshift/src/ansible/oc_atomic_container.py +++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py @@ -1,8 +1,11 @@ # pylint: skip-file # flake8: noqa -# pylint: disable=wrong-import-position,too-many-branches,invalid-name +# pylint: disable=wrong-import-position,too-many-branches,invalid-name,no-name-in-module, import-error import json + +from distutils.version import StrictVersion + from ansible.module_utils.basic import AnsibleModule @@ -127,9 +130,15 @@ def main(): ) # Verify that the platform supports atomic command - rc, _, err = module.run_command('atomic -v', check_rc=False) + rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False) if rc != 0: module.fail_json(msg="Error in running atomic command", err=err) + # This module requires atomic version 1.17.2 or later + atomic_version = StrictVersion(version_out.replace('\n', '')) + if atomic_version < StrictVersion('1.17.2'): + module.fail_json( + msg="atomic version 1.17.2+ is required", + err=str(atomic_version)) try: core(module) diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 6f0da3d5c..5e423bea9 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -33,7 +33,12 @@ class OCObject(OpenShiftCLI): def delete(self): '''delete the object''' - return self._delete(self.kind, name=self.name, selector=self.selector) + results = self._delete(self.kind, name=self.name, selector=self.selector) + if (results['returncode'] != 0 and 'stderr' in results and + '\"{}\" not found'.format(self.name) in results['stderr']): + results['returncode'] = 0 + + return results def create(self, files=None, content=None): ''' @@ -117,7 +122,8 @@ class OCObject(OpenShiftCLI): if state == 'absent': # verify its not in our results if (params['name'] is not None or params['selector'] is not None) and \ - (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): + (len(api_rval['results']) == 0 or \ + ('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj index 4ff912b2d..c6504ed01 100644 --- a/roles/lib_openshift/src/doc/obj +++ b/roles/lib_openshift/src/doc/obj @@ -39,9 +39,9 @@ options: required: false default: str aliases: [] - all_namespace: + all_namespaces: description: - - The namespace where the object lives. + - Search in all namespaces for the object. required: false default: false aliases: [] diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index b3f01008b..16770b22d 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -273,10 +273,6 @@ class OpenShiftCLI(object): elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) - rval = {} - results = '' - err = None - if self.verbose: print(' '.join(cmds)) @@ -286,34 +282,26 @@ class OpenShiftCLI(object): returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, - "results": results, "cmd": ' '.join(cmds)} - if returncode == 0: - if output: - if output_type == 'json': - try: - rval['results'] = json.loads(stdout) - except ValueError as verr: - if "No JSON object could be decoded" in verr.args: - err = verr.args - elif output_type == 'raw': - rval['results'] = stdout - - if self.verbose: - print("STDOUT: {0}".format(stdout)) - print("STDERR: {0}".format(stderr)) - - if err: - rval.update({"err": err, - "stderr": stderr, - "stdout": stdout, - "cmd": cmds}) + if output_type == 'json': + rval['results'] = {} + if output and stdout: + try: + rval['results'] = json.loads(stdout) + except ValueError as verr: + if "No JSON object could be decoded" in verr.args: + rval['err'] = verr.args + elif output_type == 'raw': + rval['results'] = stdout if output else '' - else: + if self.verbose: + print("STDOUT: {0}".format(stdout)) + print("STDERR: {0}".format(stderr)) + + if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, - "stdout": stdout, - "results": {}}) + "stdout": stdout}) return rval diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index 0242f5b43..44a8fa29b 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -104,6 +104,7 @@ platforms missing the Python OpenSSL library. self.extensions = [] PARSING_ALT_NAMES = False + PARSING_HEX_SERIAL = False for line in self.cert_string.split('\n'): l = line.strip() if PARSING_ALT_NAMES: @@ -114,10 +115,26 @@ platforms missing the Python OpenSSL library. PARSING_ALT_NAMES = False continue + if PARSING_HEX_SERIAL: + # Hex serials arrive colon-delimited + serial_raw = l.replace(':', '') + # Convert to decimal + self.serial = int('0x' + serial_raw, base=16) + PARSING_HEX_SERIAL = False + continue + # parse out the bits that we can if l.startswith('Serial Number:'): - # Serial Number: 11 (0xb) - # => 11 + # Decimal format: + # Serial Number: 11 (0xb) + # => 11 + # Hex Format (large serials): + # Serial Number: + # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf + # => 14449739080294792594019643629255165375 + if l.endswith(':'): + PARSING_HEX_SERIAL = True + continue self.serial = int(l.split()[-2]) elif l.startswith('Not After :'): diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py index 4ca35ecbc..df948fff0 100644 --- a/roles/openshift_certificate_expiry/test/conftest.py +++ b/roles/openshift_certificate_expiry/test/conftest.py @@ -23,7 +23,10 @@ VALID_CERTIFICATE_PARAMS = [ { 'short_name': 'combined', 'cn': 'combined.example.com', - 'serial': 6, + # Verify that HUGE serials parse correctly. + # Frobs PARSING_HEX_SERIAL in _parse_cert + # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240 + 'serial': 14449739080294792594019643629255165375, 'uses': b'clientAuth, serverAuth', 'dns': ['etcd'], 'ip': ['10.0.0.2', '192.168.0.2'] diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md new file mode 100644 index 000000000..8283afed6 --- /dev/null +++ b/roles/openshift_cfme/README.md @@ -0,0 +1,404 @@ +# OpenShift-Ansible - CFME Role + +# PROOF OF CONCEPT - Alpha Version + +This role is based on the work in the upstream +[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) +project. For additional literature on configuration specific to +ManageIQ (optional post-installation tasks), visit the project's +[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration). + +Please submit a +[new issue](https://github.com/openshift/openshift-ansible/issues/new) +if you run into bugs with this role or wish to request enhancements. + +# Important Notes + +This is an early *proof of concept* role to install the Cloud Forms +Management Engine (ManageIQ) on OpenShift Container Platform (OCP). + +* This role is still in **ALPHA STATUS** +* Many options are hard-coded still (ex: NFS setup) +* Not many configurable options yet +* **Should** be ran on a dedicated cluster +* **Will not run** on undersized infra +* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable + +## Requirements + +**NOTE:** These requirements are copied from the upstream +[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods) +project. + +### Prerequisites: + +* + [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html) + or + [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html) + provisioned +* NFS or other compatible volume provider +* A cluster-admin user (created by role if required) + +### Cluster Sizing + +In order to avoid random deployment failures due to resource +starvation, we recommend a minimum cluster size for a **test** +environment. + +| Type | Size | CPUs | Memory | +|----------------|---------|----------|----------| +| Masters | `1+` | `8` | `12GB` | +| Nodes | `2+` | `4` | `8GB` | +| PV Storage | `25GB` | `N/A` | `N/A` | + + +![Basic CFME Deployment](img/CFMEBasicDeployment.png) + +**CFME has hard-requirements for memory. CFME will NOT install if your + infrastructure does not meet or exceed the requirements given + above. Do not run this playbook if you do not have the required + memory, you will just waste your time.** + + +### Other sizing considerations + +* Recommendations assume MIQ will be the **only application running** + on this cluster. +* Alternatively, you can provision an infrastructure node to run + registry/metrics/router/logging pods. +* Each MIQ application pod will consume at least `3GB` of RAM on initial + deployment (blank deployment without providers). +* RAM consumption will ramp up higher depending on appliance use, once + providers are added expect higher resource consumption. + + +### Assumptions + +1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements +1) Your NFS server is on your master host +1) Your PV backing NFS storage volume is mounted on `/exports/` + +Required directories that NFS will export to back the PVs: + +* `/exports/miq-pv0[123]` + +If the required directories are not present at install-time, they will +be created using the recommended permissions per the +[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data): + +* UID/GID: `root`/`root` +* Mode: `0775` + +**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS + storage, **ensure** it is mounted on `/exports/` **before** running + this role. + + + +## Role Variables + +Core variables in this role: + +| Name | Default value | Description | +|-------------------------------|---------------|---------------| +| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding | + + +Variables you may override have defaults defined in +[defaults/main.yml](defaults/main.yml). + + +# Important Notes + +This is a **tech preview** status role presently. Use it with the same +caution you would give any other pre-release software. + +**Most importantly** follow this one rule: don't re-run the entrypoint +playbook multiple times in a row without cleaning up after previous +runs if some of the CFME steps have ran. This is a known +flake. Cleanup instructions are provided at the bottom of this README. + + +# Usage + +This section describes the basic usage of this role. All parameters +will use their [default values](defaults/main.yml). + +## Pre-flight Checks + +**IMPORTANT:** As documented above in [the prerequisites](#prerequisites), + you **must already** have your OCP cluster up and running. + +**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to +save some spin-up time post-deployment, you can begin pre-pulling the +docker image to each of your nodes now: + +``` +root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine +``` + +## Getting Started + +1) The *entry point playbook* to install CFME is located in +[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml) +directory + +2) Update your existing `hosts` inventory file and ensure the +parameter `openshift_cfme_install_app` is set to `True` under the +`[OSEv3:vars]` block. + +2) Using your existing `hosts` inventory file, run `ansible-playbook` +with the entry point playbook: + +``` +$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml +``` + +## Next Steps + +Once complete, the playbook will let you know: + + +``` +TASK [openshift_cfme : Status update] ********************************************************* +ok: [ho.st.na.me] => { + "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n" +} +``` + +This will take several minutes (*possibly 10 or more*, depending on +your network connection). However, you can get some insight into the +deployment process during initialization. + +### oc describe pod manageiq-0 + +*Some useful information about the output you will see if you run the +`oc describe pod manageiq-0` command* + +**Readiness probe**s - These will take a while to become +`Healthy`. The initial health probes won't even happen for at least 8 +minutes depending on how long it takes you to pull down the large +images. ManageIQ is a large application so it may take a considerable +amount of time for it to deploy and be marked as `Healthy`. + +If you go to the node you know the application is running on (check +for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe` +output) you can run a `docker pull` command to monitor the progress of +the image pull: + +``` +[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine +Trying to pull repository docker.io/manageiq/manageiq-pods ... +sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods +Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a +Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine +``` + +The example above demonstrates the case where the image has been +successfully pulled already. + +If the image isn't completely pulled already then you will see +multiple progress bars detailing each image layer download status. + + +### rsh + +*Useful inspection/progress monitoring techniques with the `oc rsh` +command.* + + +On your master node, switch to the `cfme` project (or whatever you +named it if you overrode the `openshift_cfme_project` variable) and +check on the pod states: + +``` +[root@cfme-master01 ~]# oc project cfme +Now using project "cfme" on server "https://10.10.0.100:8443". + +[root@cfme-master01 ~]# oc get pod +NAME READY STATUS RESTARTS AGE +manageiq-0 0/1 Running 0 14m +memcached-1-3lk7g 1/1 Running 0 14m +postgresql-1-12slb 1/1 Running 0 14m +``` + +Note how the `manageiq-0` pod says `0/1` under the **READY** +column. After some time (depending on your network connection) you'll +be able to `rsh` into the pod to find out more of what's happening in +real time. First, the easy-mode command, run this once `rsh` is +available and then watch until it says `Started Initialize Appliance +Database`: + +``` +[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service +``` + +For the full explanation of what this means, and more interactive +inspection techniques, keep reading on. + +To obtain a shell on our `manageiq` pod we use this command: + +``` +[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l +``` + +The `rsh` command opens a shell in your pod for you. In this case it's +the pod called `manageiq-0`. `systemd` is managing the services in +this pod so we can use the `list-units` command to see what is running +currently: `# systemctl list-units | grep appliance`. + +If you see the `appliance-initialize` service running, this indicates +that basic setup is still in progress. We can monitor the process with +the `journalctl` command like so: + + +``` +[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service +Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status == +Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV +Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV == +Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment == +Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config == +Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance == +Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key +Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database +Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database... +Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting +Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete +Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data == +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup == +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816 +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/ +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53 +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks == +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping +Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping +Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service. +Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database. +``` + +Most of what we see here (above) is the initial database seeding +process. This process isn't very quick, so be patient. + +At the bottom of the log there is a special line from the `systemctl` +service, `Removed symlink +/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The +`appliance-initialize` service is no longer marked as enabled. This +indicates that the base application initialization is complete now. + +We're not done yet though, there are other ancillary services which +run in this pod to support the application. *Still in the rsh shell*, +Use the `ps` command to monitor for the `httpd` processes +starting. You will see output similar to the following when that stage +has completed: + +``` +[root@manageiq-0 vmdb]# ps aux | grep http +root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND +apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND +apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND +apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND +apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND +``` + +Furthermore, you can find other related processes by just looking for +ones with `MIQ` in their name: + +``` +[root@manageiq-0 vmdb]# ps aux | grep miq +root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server +root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic +root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic +root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic +root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic +root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5 +root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems +root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting +root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting +root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker] +root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker] +root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker] +root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq +``` + +Finally, *still in the rsh shell*, to test if the application is +running correctly, we can request the application homepage. If the +page is available the page title will be `ManageIQ: Login`: + +``` +[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>' +<title> +ManageIQ: Login +</title> +``` + +**Note:** The `-s` flag makes `curl` operations silent and the `-k` +flag to ignore errors about untrusted certificates. + + + +# Additional Upstream Resources + +Below are some useful resources from the upstream project +documentation. You may find these of value. + +* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful) +* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes) +* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting) + + +# Manual Cleanup + +At this time uninstallation/cleanup is still a manual process. You +will have to follow a few steps to fully remove CFME from your +cluster. + +Delete the project: + +* `oc delete project cfme` + +Delete the PVs: + +* `oc delete pv miq-pv01` +* `oc delete pv miq-pv02` +* `oc delete pv miq-pv03` + +Clean out the old PV data: + +* `cd /exports/` +* `find miq* -type f -delete` +* `find miq* -type d -delete` + +Remove the NFS exports: + +* `rm /etc/exports.d/openshift_cfme.exports` +* `exportfs -ar` + +Delete the user: + +* `oc delete user cfme` + +**NOTE:** The `oc delete project cfme` command will return quickly +however it will continue to operate in the background. Continue +running `oc get project` after you've completed the other steps to +monitor the pods and final project termination progress. diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml new file mode 100644 index 000000000..493e1ef68 --- /dev/null +++ b/roles/openshift_cfme/defaults/main.yml @@ -0,0 +1,38 @@ +--- +# Namespace for the CFME project +openshift_cfme_project: cfme +# Namespace/project description +openshift_cfme_project_description: ManageIQ - CloudForms Management Engine +# Basic user assigned the `admin` role for the project +openshift_cfme_user: cfme +# Project system account for enabling privileged pods +openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default" +# All the required exports +openshift_cfme_pv_exports: + - miq-pv01 + - miq-pv02 + - miq-pv03 +# PV template files and their created object names +openshift_cfme_pv_data: + - pv_name: miq-pv01 + pv_template: miq-pv-db.yaml + pv_label: CFME DB PV + - pv_name: miq-pv02 + pv_template: miq-pv-region.yaml + pv_label: CFME Region PV + - pv_name: miq-pv03 + pv_template: miq-pv-server.yaml + pv_label: CFME Server PV + +# Tuning parameter to use more than 5 images at once from an ImageStream +openshift_cfme_maxImagesBulkImportedPerRepository: 100 +# Hostname/IP of the NFS server. Currently defaults to first master +openshift_cfme_nfs_server: "{{ groups.nfs.0 }}" +# TODO: Refactor '_install_app' variable. This is just for testing but +# maybe in the future it should control the entire yes/no for CFME. +# +# Whether or not the manageiq app should be initialized ('oc new-app +# --template=manageiq). If False everything UP TO 'new-app' is ran. +openshift_cfme_install_app: False +# Docker image to pull +openshift_cfme_container_image: "docker.io/manageiq/manageiq-pods:app-latest-fine" diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml new file mode 100644 index 000000000..8f0d2af38 --- /dev/null +++ b/roles/openshift_cfme/files/miq-template.yaml @@ -0,0 +1,566 @@ +--- +path: /tmp/miq-template-out +data: + apiVersion: v1 + kind: Template + labels: + template: manageiq + metadata: + name: manageiq + annotations: + description: "ManageIQ appliance with persistent storage" + tags: "instant-app,manageiq,miq" + iconClass: "icon-rails" + objects: + - apiVersion: v1 + kind: Secret + metadata: + name: "${NAME}-secrets" + stringData: + pg-password: "${DATABASE_PASSWORD}" + - apiVersion: v1 + kind: Service + metadata: + annotations: + description: "Exposes and load balances ManageIQ pods" + service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' + name: ${NAME} + spec: + clusterIP: None + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + - name: https + port: 443 + protocol: TCP + targetPort: 443 + selector: + name: ${NAME} + - apiVersion: v1 + kind: Route + metadata: + name: ${NAME} + spec: + host: ${APPLICATION_DOMAIN} + port: + targetPort: https + tls: + termination: passthrough + to: + kind: Service + name: ${NAME} + - apiVersion: v1 + kind: ImageStream + metadata: + name: miq-app + annotations: + description: "Keeps track of the ManageIQ image changes" + spec: + dockerImageRepository: "${APPLICATION_IMG_NAME}" + - apiVersion: v1 + kind: ImageStream + metadata: + name: miq-postgresql + annotations: + description: "Keeps track of the PostgreSQL image changes" + spec: + dockerImageRepository: "${POSTGRESQL_IMG_NAME}" + - apiVersion: v1 + kind: ImageStream + metadata: + name: miq-memcached + annotations: + description: "Keeps track of the Memcached image changes" + spec: + dockerImageRepository: "${MEMCACHED_IMG_NAME}" + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-${DATABASE_SERVICE_NAME}" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${DATABASE_VOLUME_CAPACITY} + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: "${NAME}-region" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${APPLICATION_REGION_VOLUME_CAPACITY} + - apiVersion: apps/v1beta1 + kind: "StatefulSet" + metadata: + name: ${NAME} + annotations: + description: "Defines how to deploy the ManageIQ appliance" + spec: + serviceName: "${NAME}" + replicas: "${APPLICATION_REPLICA_COUNT}" + template: + metadata: + labels: + name: ${NAME} + name: ${NAME} + spec: + containers: + - name: manageiq + image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" + livenessProbe: + tcpSocket: + port: 443 + initialDelaySeconds: 480 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: / + port: 443 + scheme: HTTPS + initialDelaySeconds: 200 + timeoutSeconds: 3 + ports: + - containerPort: 80 + protocol: TCP + - containerPort: 443 + protocol: TCP + securityContext: + privileged: true + volumeMounts: + - + name: "${NAME}-server" + mountPath: "/persistent" + - + name: "${NAME}-region" + mountPath: "/persistent-region" + env: + - + name: "APPLICATION_INIT_DELAY" + value: "${APPLICATION_INIT_DELAY}" + - + name: "DATABASE_SERVICE_NAME" + value: "${DATABASE_SERVICE_NAME}" + - + name: "DATABASE_REGION" + value: "${DATABASE_REGION}" + - + name: "MEMCACHED_SERVICE_NAME" + value: "${MEMCACHED_SERVICE_NAME}" + - + name: "POSTGRESQL_USER" + value: "${DATABASE_USER}" + - + name: "POSTGRESQL_PASSWORD" + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: "pg-password" + - + name: "POSTGRESQL_DATABASE" + value: "${DATABASE_NAME}" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - + name: "POSTGRESQL_SHARED_BUFFERS" + value: "${POSTGRESQL_SHARED_BUFFERS}" + resources: + requests: + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" + lifecycle: + preStop: + exec: + command: + - /opt/manageiq/container-scripts/sync-pv-data + volumes: + - + name: "${NAME}-region" + persistentVolumeClaim: + claimName: ${NAME}-region + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + # Uncomment this if using dynamic volume provisioning. + # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html + # volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ ReadWriteOnce ] + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" + - apiVersion: v1 + kind: "Service" + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: "Exposes the memcached server" + spec: + ports: + - + name: "memcached" + port: 11211 + targetPort: 11211 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + - apiVersion: v1 + kind: "DeploymentConfig" + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + annotations: + description: "Defines how to deploy memcached" + spec: + strategy: + type: "Recreate" + triggers: + - + type: "ImageChange" + imageChangeParams: + automatic: true + containerNames: + - "memcached" + from: + kind: "ImageStreamTag" + name: "miq-memcached:${MEMCACHED_IMG_TAG}" + - + type: "ConfigChange" + replicas: 1 + selector: + name: "${MEMCACHED_SERVICE_NAME}" + template: + metadata: + name: "${MEMCACHED_SERVICE_NAME}" + labels: + name: "${MEMCACHED_SERVICE_NAME}" + spec: + volumes: [] + containers: + - + name: "memcached" + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" + ports: + - + containerPort: 11211 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 5 + tcpSocket: + port: 11211 + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 30 + tcpSocket: + port: 11211 + volumeMounts: [] + env: + - + name: "MEMCACHED_MAX_MEMORY" + value: "${MEMCACHED_MAX_MEMORY}" + - + name: "MEMCACHED_MAX_CONNECTIONS" + value: "${MEMCACHED_MAX_CONNECTIONS}" + - + name: "MEMCACHED_SLAB_PAGE_SIZE" + value: "${MEMCACHED_SLAB_PAGE_SIZE}" + resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" + limits: + memory: "${MEMCACHED_MEM_LIMIT}" + - apiVersion: v1 + kind: "Service" + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: "Exposes the database server" + spec: + ports: + - + name: "postgresql" + port: 5432 + targetPort: 5432 + selector: + name: "${DATABASE_SERVICE_NAME}" + - apiVersion: v1 + kind: "DeploymentConfig" + metadata: + name: "${DATABASE_SERVICE_NAME}" + annotations: + description: "Defines how to deploy the database" + spec: + strategy: + type: "Recreate" + triggers: + - + type: "ImageChange" + imageChangeParams: + automatic: true + containerNames: + - "postgresql" + from: + kind: "ImageStreamTag" + name: "miq-postgresql:${POSTGRESQL_IMG_TAG}" + - + type: "ConfigChange" + replicas: 1 + selector: + name: "${DATABASE_SERVICE_NAME}" + template: + metadata: + name: "${DATABASE_SERVICE_NAME}" + labels: + name: "${DATABASE_SERVICE_NAME}" + spec: + volumes: + - + name: "miq-pgdb-volume" + persistentVolumeClaim: + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" + containers: + - + name: "postgresql" + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" + ports: + - + containerPort: 5432 + readinessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 15 + exec: + command: + - "/bin/sh" + - "-i" + - "-c" + - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" + livenessProbe: + timeoutSeconds: 1 + initialDelaySeconds: 60 + tcpSocket: + port: 5432 + volumeMounts: + - + name: "miq-pgdb-volume" + mountPath: "/var/lib/pgsql/data" + env: + - + name: "POSTGRESQL_USER" + value: "${DATABASE_USER}" + - + name: "POSTGRESQL_PASSWORD" + valueFrom: + secretKeyRef: + name: "${NAME}-secrets" + key: "pg-password" + - + name: "POSTGRESQL_DATABASE" + value: "${DATABASE_NAME}" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + value: "${POSTGRESQL_MAX_CONNECTIONS}" + - + name: "POSTGRESQL_SHARED_BUFFERS" + value: "${POSTGRESQL_SHARED_BUFFERS}" + resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" + limits: + memory: "${POSTGRESQL_MEM_LIMIT}" + + parameters: + - + name: "NAME" + displayName: Name + required: true + description: "The name assigned to all of the frontend objects defined in this template." + value: manageiq + - + name: "DATABASE_SERVICE_NAME" + displayName: "PostgreSQL Service Name" + required: true + description: "The name of the OpenShift Service exposed for the PostgreSQL container." + value: "postgresql" + - + name: "DATABASE_USER" + displayName: "PostgreSQL User" + required: true + description: "PostgreSQL user that will access the database." + value: "root" + - + name: "DATABASE_PASSWORD" + displayName: "PostgreSQL Password" + required: true + description: "Password for the PostgreSQL user." + from: "[a-zA-Z0-9]{8}" + generate: expression + - + name: "DATABASE_NAME" + required: true + displayName: "PostgreSQL Database Name" + description: "Name of the PostgreSQL database accessed." + value: "vmdb_production" + - + name: "DATABASE_REGION" + required: true + displayName: "Application Database Region" + description: "Database region that will be used for application." + value: "0" + - + name: "MEMCACHED_SERVICE_NAME" + required: true + displayName: "Memcached Service Name" + description: "The name of the OpenShift Service exposed for the Memcached container." + value: "memcached" + - + name: "MEMCACHED_MAX_MEMORY" + displayName: "Memcached Max Memory" + description: "Memcached maximum memory for memcached object storage in MB." + value: "64" + - + name: "MEMCACHED_MAX_CONNECTIONS" + displayName: "Memcached Max Connections" + description: "Memcached maximum number of connections allowed." + value: "1024" + - + name: "MEMCACHED_SLAB_PAGE_SIZE" + displayName: "Memcached Slab Page Size" + description: "Memcached size of each slab page." + value: "1m" + - + name: "POSTGRESQL_MAX_CONNECTIONS" + displayName: "PostgreSQL Max Connections" + description: "PostgreSQL maximum number of database connections allowed." + value: "100" + - + name: "POSTGRESQL_SHARED_BUFFERS" + displayName: "PostgreSQL Shared Buffer Amount" + description: "Amount of memory dedicated for PostgreSQL shared memory buffers." + value: "256MB" + - + name: "APPLICATION_CPU_REQ" + displayName: "Application Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." + value: "1000m" + - + name: "POSTGRESQL_CPU_REQ" + displayName: "PostgreSQL Min CPU Requested" + required: true + description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." + value: "500m" + - + name: "MEMCACHED_CPU_REQ" + displayName: "Memcached Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." + value: "200m" + - + name: "APPLICATION_MEM_REQ" + displayName: "Application Min RAM Requested" + required: true + description: "Minimum amount of memory the Application container will need." + value: "6144Mi" + - + name: "POSTGRESQL_MEM_REQ" + displayName: "PostgreSQL Min RAM Requested" + required: true + description: "Minimum amount of memory the PostgreSQL container will need." + value: "1024Mi" + - + name: "MEMCACHED_MEM_REQ" + displayName: "Memcached Min RAM Requested" + required: true + description: "Minimum amount of memory the Memcached container will need." + value: "64Mi" + - + name: "APPLICATION_MEM_LIMIT" + displayName: "Application Max RAM Limit" + required: true + description: "Maximum amount of memory the Application container can consume." + value: "16384Mi" + - + name: "POSTGRESQL_MEM_LIMIT" + displayName: "PostgreSQL Max RAM Limit" + required: true + description: "Maximum amount of memory the PostgreSQL container can consume." + value: "8192Mi" + - + name: "MEMCACHED_MEM_LIMIT" + displayName: "Memcached Max RAM Limit" + required: true + description: "Maximum amount of memory the Memcached container can consume." + value: "256Mi" + - + name: "POSTGRESQL_IMG_NAME" + displayName: "PostgreSQL Image Name" + description: "This is the PostgreSQL image name requested to deploy." + value: "docker.io/manageiq/manageiq-pods" + - + name: "POSTGRESQL_IMG_TAG" + displayName: "PostgreSQL Image Tag" + description: "This is the PostgreSQL image tag/version requested to deploy." + value: "postgresql-latest-fine" + - + name: "MEMCACHED_IMG_NAME" + displayName: "Memcached Image Name" + description: "This is the Memcached image name requested to deploy." + value: "docker.io/manageiq/manageiq-pods" + - + name: "MEMCACHED_IMG_TAG" + displayName: "Memcached Image Tag" + description: "This is the Memcached image tag/version requested to deploy." + value: "memcached-latest-fine" + - + name: "APPLICATION_IMG_NAME" + displayName: "Application Image Name" + description: "This is the Application image name requested to deploy." + value: "docker.io/manageiq/manageiq-pods" + - + name: "APPLICATION_IMG_TAG" + displayName: "Application Image Tag" + description: "This is the Application image tag/version requested to deploy." + value: "app-latest-fine" + - + name: "APPLICATION_DOMAIN" + displayName: "Application Hostname" + description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted." + value: "" + - + name: "APPLICATION_REPLICA_COUNT" + displayName: "Application Replica Count" + description: "This is the number of Application replicas requested to deploy." + value: "1" + - + name: "APPLICATION_INIT_DELAY" + displayName: "Application Init Delay" + required: true + description: "Delay in seconds before we attempt to initialize the application." + value: "15" + - + name: "APPLICATION_VOLUME_CAPACITY" + displayName: "Application Volume Capacity" + required: true + description: "Volume space available for application data." + value: "5Gi" + - + name: "APPLICATION_REGION_VOLUME_CAPACITY" + displayName: "Application Region Volume Capacity" + required: true + description: "Volume space available for region application data." + value: "5Gi" + - + name: "DATABASE_VOLUME_CAPACITY" + displayName: "Database Volume Capacity" + required: true + description: "Volume space available for database." + value: "15Gi" diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports new file mode 100644 index 000000000..5457d41fc --- /dev/null +++ b/roles/openshift_cfme/files/openshift_cfme.exports @@ -0,0 +1,3 @@ +/exports/miq-pv01 *(rw,no_root_squash,no_wdelay) +/exports/miq-pv02 *(rw,no_root_squash,no_wdelay) +/exports/miq-pv03 *(rw,no_root_squash,no_wdelay) diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml new file mode 100644 index 000000000..476a5e030 --- /dev/null +++ b/roles/openshift_cfme/handlers/main.yml @@ -0,0 +1,42 @@ +--- +###################################################################### +# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml +# +# TODO: Use the consolidated 'openshift_handlers' role once it's ready +# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782 +###################################################################### + +- name: restart master + systemd: name={{ openshift.common.service_type }}-master state=restarted + when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) + notify: Verify API Server + +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + {% if openshift.common.version_gte_3_2_or_1_2 | bool %} + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {% else %} + --cacert {{ openshift.common.config_base }}/master/ca.crt + {% endif %} + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png Binary files differnew file mode 100644 index 000000000..a89c1e325 --- /dev/null +++ b/roles/openshift_cfme/img/CFMEBasicDeployment.png diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml new file mode 100644 index 000000000..9200f2c3c --- /dev/null +++ b/roles/openshift_cfme/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: Tim Bielawa + description: OpenShift CFME (Manage IQ) Deployer + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + version: 1.0 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- role: lib_openshift +- role: lib_utils +- role: openshift_common +- role: openshift_master_facts diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml new file mode 100644 index 000000000..7fa7d3997 --- /dev/null +++ b/roles/openshift_cfme/tasks/create_pvs.yml @@ -0,0 +1,36 @@ +--- +# Check for existance and then conditionally: +# - evaluate templates +# - PVs +# +# These tasks idempotently create required CFME PV objects. Do not +# call this file directly. This file is intended to be ran as an +# include that has a 'with_items' attached to it. Hence the use below +# of variables like "{{ item.pv_label }}" + +- name: "Check if the {{ item.pv_label }} template has been created already" + oc_obj: + namespace: "{{ openshift_cfme_project }}" + state: list + kind: pv + name: "{{ item.pv_name }}" + register: miq_pv_check + +# Skip all of this if the PV already exists +- block: + - name: "Ensure the {{ item.pv_label }} template is evaluated" + template: + src: "{{ item.pv_template }}.j2" + dest: "{{ template_dir }}/{{ item.pv_template }}" + + - name: "Ensure {{ item.pv_label }} is created" + oc_obj: + namespace: "{{ openshift_cfme_project }}" + kind: pv + name: "{{ item.pv_name }}" + state: present + delete_after: True + files: + - "{{ template_dir }}/{{ item.pv_template }}" + when: + - not miq_pv_check.results.results.0 diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml new file mode 100644 index 000000000..acbce7232 --- /dev/null +++ b/roles/openshift_cfme/tasks/main.yml @@ -0,0 +1,148 @@ +--- +###################################################################### +# Users, projects, and privileges + +- name: Ensure the CFME user exists + oc_user: + state: present + username: "{{ openshift_cfme_user }}" + +- name: Ensure the CFME namespace exists with CFME user as admin + oc_project: + state: present + name: "{{ openshift_cfme_project }}" + display_name: "{{ openshift_cfme_project_description }}" + admin: "{{ openshift_cfme_user }}" + +- name: Ensure the CFME namespace service account is privileged + oc_adm_policy_user: + namespace: "{{ openshift_cfme_project }}" + user: "{{ openshift_cfme_service_account }}" + resource_kind: scc + resource_name: privileged + state: present + +###################################################################### +# NFS + +- name: Ensure the /exports/ directory exists + file: + path: /exports/ + state: directory + mode: 0755 + owner: root + group: root + +- name: Ensure the miq-pv0X export directories exist + file: + path: "/exports/{{ item }}" + state: directory + mode: 0775 + owner: root + group: root + with_items: "{{ openshift_cfme_pv_exports }}" + +- name: Ensure the NFS exports for CFME PVs exist + copy: + src: openshift_cfme.exports + dest: /etc/exports.d/openshift_cfme.exports + register: nfs_exports_updated + +- name: Ensure the NFS export table is refreshed if exports were added + command: exportfs -ar + when: + - nfs_exports_updated.changed + + +###################################################################### +# Create the required CFME PVs. Check out these online docs if you +# need a refresher on includes looping with items: +# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0 +# * http://stackoverflow.com/a/35128533 +# +# TODO: Handle the case where a PV template is updated in +# openshift-ansible and the change needs to be landed on the managed +# cluster. + +- include: create_pvs.yml + with_items: "{{ openshift_cfme_pv_data }}" + +###################################################################### +# CFME App Template +# +# Note, this is different from the create_pvs.yml tasks in that the +# application template does not require any jinja2 evaluation. +# +# TODO: Handle the case where the server template is updated in +# openshift-ansible and the change needs to be landed on the managed +# cluster. + +- name: Check if the CFME Server template has been created already + oc_obj: + namespace: "{{ openshift_cfme_project }}" + state: list + kind: template + name: manageiq + register: miq_server_check + +- name: Copy over CFME Server template + copy: + src: miq-template.yaml + dest: "{{ template_dir }}/miq-template.yaml" + +- name: Ensure the server template was read from disk + debug: + var=r_openshift_cfme_miq_template_content + +- name: Ensure CFME Server Template exists + oc_obj: + namespace: "{{ openshift_cfme_project }}" + kind: template + name: "manageiq" + state: present + content: "{{ r_openshift_cfme_miq_template_content }}" + +###################################################################### +# Let's do this + +- name: Ensure the CFME Server is created + oc_process: + namespace: "{{ openshift_cfme_project }}" + template_name: manageiq + create: True + register: cfme_new_app_process + run_once: True + when: + # User said to install CFME in their inventory + - openshift_cfme_install_app | bool + # # The server app doesn't exist already + # - not miq_server_check.results.results.0 + +- debug: + var: cfme_new_app_process + +###################################################################### +# Various cleanup steps + +# TODO: Not sure what to do about this right now. Might be able to +# just delete it? This currently warns about "Unable to find +# '<TEMP_DIR>' in expected paths." +- name: Ensure the temporary PV/App templates are erased + file: + path: "{{ item }}" + state: absent + with_fileglob: + - "{{ template_dir }}/*.yaml" + +- name: Ensure the temporary PV/app template directory is erased + file: + path: "{{ template_dir }}" + state: absent + +###################################################################### + +- name: Status update + debug: + msg: > + CFME has been deployed. Note that there will be a delay before + it is fully initialized. diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml new file mode 100644 index 000000000..02b0f10bf --- /dev/null +++ b/roles/openshift_cfme/tasks/tune_masters.yml @@ -0,0 +1,12 @@ +--- +- name: Ensure bulk image import limit is tuned + yedit: + src: /etc/origin/master/master-config.yaml + key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository' + value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}" + state: present + backup: True + notify: + - restart master + +- meta: flush_handlers diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml new file mode 100644 index 000000000..cba734a0e --- /dev/null +++ b/roles/openshift_cfme/tasks/uninstall.yml @@ -0,0 +1,43 @@ +--- +- include_role: + name: lib_openshift + +- name: Uninstall CFME - ManageIQ + debug: + msg: Uninstalling Cloudforms Management Engine - ManageIQ + +- name: Ensure the CFME project is removed + oc_project: + state: absent + name: "{{ openshift_cfme_project }}" + +- name: Ensure the CFME template is removed + oc_obj: + namespace: "{{ openshift_cfme_project }}" + state: absent + kind: template + name: manageiq + +- name: Ensure the CFME PVs are removed + oc_obj: + state: absent + all_namespaces: True + kind: pv + name: "{{ item }}" + with_items: "{{ openshift_cfme_pv_exports }}" + +- name: Ensure the CFME user is removed + oc_user: + state: absent + username: "{{ openshift_cfme_user }}" + +- name: Ensure the CFME NFS Exports are removed + file: + path: /etc/exports.d/openshift_cfme.exports + state: absent + register: nfs_exports_removed + +- name: Ensure the NFS export table is refreshed if exports were removed + command: exportfs -ar + when: + - nfs_exports_removed.changed diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 new file mode 100644 index 000000000..b8c3bb277 --- /dev/null +++ b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: miq-pv01 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/miq-pv01 + server: {{ openshift_cfme_nfs_server }} + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 new file mode 100644 index 000000000..7218773f0 --- /dev/null +++ b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: miq-pv02 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/miq-pv02 + server: {{ openshift_cfme_nfs_server }} + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 new file mode 100644 index 000000000..7b40b6c69 --- /dev/null +++ b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: miq-pv03 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/miq-pv03 + server: {{ openshift_cfme_nfs_server }} + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index ee095833b..66ffd2a73 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -4,7 +4,7 @@ openshift_storageclass_defaults: name: gp2 provisioner: kubernetes.io/aws-ebs type: gp2 - gcp: + gce: name: standard provisioner: kubernetes.io/gce-pd type: pd-standard diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 82db36eba..b3ecd57a6 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -5,6 +5,7 @@ etcd_hostname: "{{ openshift.common.hostname }}" etcd_ip: "{{ openshift.common.ip }}" etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" etcd_cert_prefix: -etcd_cert_config_dir: "{{ '/etc/etcd' if not openshift.common.is_etcd_system_container | bool else '/var/lib/etcd/etcd.etcd/etc' }}" +etcd_cert_config_dir: "/etc/etcd" +etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc etcd_peer_url_scheme: https etcd_url_scheme: https diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml index 4f25a9c8f..982bd9530 100644 --- a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml @@ -48,7 +48,7 @@ objects: annotations: description: "Keeps track of changes in the CloudForms app image" spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app + dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-app - apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -188,7 +188,7 @@ objects: annotations: description: "Keeps track of changes in the CloudForms memcached image" spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached + dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-memcached - apiVersion: v1 kind: "DeploymentConfig" metadata: @@ -272,7 +272,7 @@ objects: annotations: description: "Keeps track of changes in the CloudForms postgresql image" spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql + dockerImageRepository: registry.access.redhat.com/cloudforms42/cfme-openshift-postgresql - apiVersion: v1 kind: "DeploymentConfig" metadata: diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml deleted file mode 100644 index 14bdd1dca..000000000 --- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cloudforms -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - nfs: - path: /opt/nfs/volumes-app - server: 10.19.0.216 - persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml new file mode 100644 index 000000000..250a99b8d --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-db-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv01 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv01 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml deleted file mode 100644 index 709d8d976..000000000 --- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv01 -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - nfs: - path: /opt/nfs/volumes - server: 10.19.0.216 - persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml new file mode 100644 index 000000000..cba9bbe35 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-region-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv02 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv02 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml new file mode 100644 index 000000000..c08c21265 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-server-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv03 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml index 4f25a9c8f..3bc6c5813 100644 --- a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml @@ -17,6 +17,7 @@ objects: service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' name: ${NAME} spec: + clusterIP: None ports: - name: http port: 80 @@ -48,11 +49,27 @@ objects: annotations: description: "Keeps track of changes in the CloudForms app image" spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app + dockerImageRepository: "${APPLICATION_IMG_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-postgresql + annotations: + description: "Keeps track of changes in the CloudForms postgresql image" + spec: + dockerImageRepository: "${POSTGRESQL_IMG_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-memcached + annotations: + description: "Keeps track of changes in the CloudForms memcached image" + spec: + dockerImageRepository: "${MEMCACHED_IMG_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: ${DATABASE_SERVICE_NAME} + name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - ReadWriteOnce @@ -62,45 +79,41 @@ objects: - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: ${NAME} + name: "${NAME}-region" spec: accessModes: - ReadWriteOnce resources: requests: - storage: ${APPLICATION_VOLUME_CAPACITY} -- apiVersion: v1 - kind: "DeploymentConfig" + storage: ${APPLICATION_REGION_VOLUME_CAPACITY} +- apiVersion: apps/v1beta1 + kind: "StatefulSet" metadata: name: ${NAME} annotations: description: "Defines how to deploy the CloudForms appliance" spec: + serviceName: "${NAME}" + replicas: 1 template: metadata: labels: name: ${NAME} name: ${NAME} spec: - volumes: - - - name: "cfme-app-volume" - persistentVolumeClaim: - claimName: ${NAME} containers: - - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG} - imagePullPolicy: IfNotPresent - name: cloudforms + - name: cloudforms + image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" livenessProbe: - httpGet: - path: / - port: 80 + tcpSocket: + port: 443 initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: httpGet: path: / - port: 80 + port: 443 + scheme: HTTPS initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -112,8 +125,11 @@ objects: privileged: true volumeMounts: - - name: "cfme-app-volume" + name: "${NAME}-server" mountPath: "/persistent" + - + name: "${NAME}-region" + mountPath: "/persistent-region" env: - name: "APPLICATION_INIT_DELAY" @@ -144,29 +160,32 @@ objects: value: "${POSTGRESQL_SHARED_BUFFERS}" resources: requests: - memory: "${MEMORY_APPLICATION_MIN}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" lifecycle: preStop: exec: command: - /opt/rh/cfme-container-scripts/sync-pv-data - replicas: 1 - selector: - name: ${NAME} - triggers: - - type: "ConfigChange" - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "cloudforms" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-app:${APPLICATION_IMG_TAG}" - strategy: - type: "Recreate" - recreateParams: - timeoutSeconds: 1200 + volumes: + - + name: "${NAME}-region" + persistentVolumeClaim: + claimName: ${NAME}-region + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + # Uncomment this if using dynamic volume provisioning. + # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html + # volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ ReadWriteOnce ] + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 kind: "Service" metadata: @@ -182,14 +201,6 @@ objects: selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached -- apiVersion: v1 kind: "DeploymentConfig" metadata: name: "${MEMCACHED_SERVICE_NAME}" @@ -223,7 +234,7 @@ objects: containers: - name: "memcached" - image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" ports: - containerPort: 11211 @@ -249,8 +260,11 @@ objects: name: "MEMCACHED_SLAB_PAGE_SIZE" value: "${MEMCACHED_SLAB_PAGE_SIZE}" resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" limits: - memory: "${MEMORY_MEMCACHED_LIMIT}" + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 kind: "Service" metadata: @@ -266,14 +280,6 @@ objects: selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql -- apiVersion: v1 kind: "DeploymentConfig" metadata: name: "${DATABASE_SERVICE_NAME}" @@ -307,11 +313,11 @@ objects: - name: "cfme-pgdb-volume" persistentVolumeClaim: - claimName: ${DATABASE_SERVICE_NAME} + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" containers: - name: "postgresql" - image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" ports: - containerPort: 5432 @@ -350,8 +356,11 @@ objects: name: "POSTGRESQL_SHARED_BUFFERS" value: "${POSTGRESQL_SHARED_BUFFERS}" resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" limits: - memory: "${MEMORY_POSTGRESQL_LIMIT}" + memory: "${POSTGRESQL_MEM_LIMIT}" parameters: - @@ -420,36 +429,87 @@ parameters: name: "POSTGRESQL_SHARED_BUFFERS" displayName: "PostgreSQL Shared Buffer Amount" description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "64MB" + value: "256MB" - - name: "MEMORY_APPLICATION_MIN" - displayName: "Application Memory Minimum" + name: "APPLICATION_CPU_REQ" + displayName: "Application Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." + value: "1000m" + - + name: "POSTGRESQL_CPU_REQ" + displayName: "PostgreSQL Min CPU Requested" + required: true + description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." + value: "500m" + - + name: "MEMCACHED_CPU_REQ" + displayName: "Memcached Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." + value: "200m" + - + name: "APPLICATION_MEM_REQ" + displayName: "Application Min RAM Requested" required: true description: "Minimum amount of memory the Application container will need." - value: "4096Mi" + value: "6144Mi" + - + name: "POSTGRESQL_MEM_REQ" + displayName: "PostgreSQL Min RAM Requested" + required: true + description: "Minimum amount of memory the PostgreSQL container will need." + value: "1024Mi" - - name: "MEMORY_POSTGRESQL_LIMIT" - displayName: "PostgreSQL Memory Limit" + name: "MEMCACHED_MEM_REQ" + displayName: "Memcached Min RAM Requested" required: true - description: "Maximum amount of memory the PostgreSQL container can use." - value: "2048Mi" + description: "Minimum amount of memory the Memcached container will need." + value: "64Mi" - - name: "MEMORY_MEMCACHED_LIMIT" - displayName: "Memcached Memory Limit" + name: "APPLICATION_MEM_LIMIT" + displayName: "Application Max RAM Limit" required: true - description: "Maximum amount of memory the Memcached container can use." + description: "Maximum amount of memory the Application container can consume." + value: "16384Mi" + - + name: "POSTGRESQL_MEM_LIMIT" + displayName: "PostgreSQL Max RAM Limit" + required: true + description: "Maximum amount of memory the PostgreSQL container can consume." + value: "8192Mi" + - + name: "MEMCACHED_MEM_LIMIT" + displayName: "Memcached Max RAM Limit" + required: true + description: "Maximum amount of memory the Memcached container can consume." value: "256Mi" - + name: "POSTGRESQL_IMG_NAME" + displayName: "PostgreSQL Image Name" + description: "This is the PostgreSQL image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" + - name: "POSTGRESQL_IMG_TAG" displayName: "PostgreSQL Image Tag" description: "This is the PostgreSQL image tag/version requested to deploy." value: "latest" - + name: "MEMCACHED_IMG_NAME" + displayName: "Memcached Image Name" + description: "This is the Memcached image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" + - name: "MEMCACHED_IMG_TAG" displayName: "Memcached Image Tag" description: "This is the Memcached image tag/version requested to deploy." value: "latest" - + name: "APPLICATION_IMG_NAME" + displayName: "Application Image Name" + description: "This is the Application image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" + - name: "APPLICATION_IMG_TAG" displayName: "Application Image Tag" description: "This is the Application image tag/version requested to deploy." @@ -464,16 +524,22 @@ parameters: displayName: "Application Init Delay" required: true description: "Delay in seconds before we attempt to initialize the application." - value: "30" + value: "15" - name: "APPLICATION_VOLUME_CAPACITY" displayName: "Application Volume Capacity" required: true description: "Volume space available for application data." - value: "1Gi" + value: "5Gi" + - + name: "APPLICATION_REGION_VOLUME_CAPACITY" + displayName: "Application Region Volume Capacity" + required: true + description: "Volume space available for region application data." + value: "5Gi" - name: "DATABASE_VOLUME_CAPACITY" displayName: "Database Volume Capacity" required: true description: "Volume space available for database." - value: "1Gi" + value: "15Gi" diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml deleted file mode 100644 index 14bdd1dca..000000000 --- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: cloudforms -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - nfs: - path: /opt/nfs/volumes-app - server: 10.19.0.216 - persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml new file mode 100644 index 000000000..250a99b8d --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-db-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv01 +spec: + capacity: + storage: 15Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv01 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml deleted file mode 100644 index 709d8d976..000000000 --- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: nfs-pv01 -spec: - capacity: - storage: 2Gi - accessModes: - - ReadWriteOnce - nfs: - path: /opt/nfs/volumes - server: 10.19.0.216 - persistentVolumeReclaimPolicy: Recycle diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml new file mode 100644 index 000000000..cba9bbe35 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-region-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv02 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv02 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml new file mode 100644 index 000000000..c08c21265 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-server-example.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: cfme-pv03 +spec: + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + nfs: + path: /exports/cfme-pv03 + server: <your-nfs-host-here> + persistentVolumeReclaimPolicy: Retain diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml index 4f25a9c8f..3bc6c5813 100644 --- a/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml @@ -17,6 +17,7 @@ objects: service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]' name: ${NAME} spec: + clusterIP: None ports: - name: http port: 80 @@ -48,11 +49,27 @@ objects: annotations: description: "Keeps track of changes in the CloudForms app image" spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app + dockerImageRepository: "${APPLICATION_IMG_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-postgresql + annotations: + description: "Keeps track of changes in the CloudForms postgresql image" + spec: + dockerImageRepository: "${POSTGRESQL_IMG_NAME}" +- apiVersion: v1 + kind: ImageStream + metadata: + name: cfme-openshift-memcached + annotations: + description: "Keeps track of changes in the CloudForms memcached image" + spec: + dockerImageRepository: "${MEMCACHED_IMG_NAME}" - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: ${DATABASE_SERVICE_NAME} + name: "${NAME}-${DATABASE_SERVICE_NAME}" spec: accessModes: - ReadWriteOnce @@ -62,45 +79,41 @@ objects: - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: ${NAME} + name: "${NAME}-region" spec: accessModes: - ReadWriteOnce resources: requests: - storage: ${APPLICATION_VOLUME_CAPACITY} -- apiVersion: v1 - kind: "DeploymentConfig" + storage: ${APPLICATION_REGION_VOLUME_CAPACITY} +- apiVersion: apps/v1beta1 + kind: "StatefulSet" metadata: name: ${NAME} annotations: description: "Defines how to deploy the CloudForms appliance" spec: + serviceName: "${NAME}" + replicas: 1 template: metadata: labels: name: ${NAME} name: ${NAME} spec: - volumes: - - - name: "cfme-app-volume" - persistentVolumeClaim: - claimName: ${NAME} containers: - - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG} - imagePullPolicy: IfNotPresent - name: cloudforms + - name: cloudforms + image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}" livenessProbe: - httpGet: - path: / - port: 80 + tcpSocket: + port: 443 initialDelaySeconds: 480 timeoutSeconds: 3 readinessProbe: httpGet: path: / - port: 80 + port: 443 + scheme: HTTPS initialDelaySeconds: 200 timeoutSeconds: 3 ports: @@ -112,8 +125,11 @@ objects: privileged: true volumeMounts: - - name: "cfme-app-volume" + name: "${NAME}-server" mountPath: "/persistent" + - + name: "${NAME}-region" + mountPath: "/persistent-region" env: - name: "APPLICATION_INIT_DELAY" @@ -144,29 +160,32 @@ objects: value: "${POSTGRESQL_SHARED_BUFFERS}" resources: requests: - memory: "${MEMORY_APPLICATION_MIN}" + memory: "${APPLICATION_MEM_REQ}" + cpu: "${APPLICATION_CPU_REQ}" + limits: + memory: "${APPLICATION_MEM_LIMIT}" lifecycle: preStop: exec: command: - /opt/rh/cfme-container-scripts/sync-pv-data - replicas: 1 - selector: - name: ${NAME} - triggers: - - type: "ConfigChange" - - type: "ImageChange" - imageChangeParams: - automatic: true - containerNames: - - "cloudforms" - from: - kind: "ImageStreamTag" - name: "cfme-openshift-app:${APPLICATION_IMG_TAG}" - strategy: - type: "Recreate" - recreateParams: - timeoutSeconds: 1200 + volumes: + - + name: "${NAME}-region" + persistentVolumeClaim: + claimName: ${NAME}-region + volumeClaimTemplates: + - metadata: + name: "${NAME}-server" + annotations: + # Uncomment this if using dynamic volume provisioning. + # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html + # volume.alpha.kubernetes.io/storage-class: anything + spec: + accessModes: [ ReadWriteOnce ] + resources: + requests: + storage: "${APPLICATION_VOLUME_CAPACITY}" - apiVersion: v1 kind: "Service" metadata: @@ -182,14 +201,6 @@ objects: selector: name: "${MEMCACHED_SERVICE_NAME}" - apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-memcached - annotations: - description: "Keeps track of changes in the CloudForms memcached image" - spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached -- apiVersion: v1 kind: "DeploymentConfig" metadata: name: "${MEMCACHED_SERVICE_NAME}" @@ -223,7 +234,7 @@ objects: containers: - name: "memcached" - image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}" + image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}" ports: - containerPort: 11211 @@ -249,8 +260,11 @@ objects: name: "MEMCACHED_SLAB_PAGE_SIZE" value: "${MEMCACHED_SLAB_PAGE_SIZE}" resources: + requests: + memory: "${MEMCACHED_MEM_REQ}" + cpu: "${MEMCACHED_CPU_REQ}" limits: - memory: "${MEMORY_MEMCACHED_LIMIT}" + memory: "${MEMCACHED_MEM_LIMIT}" - apiVersion: v1 kind: "Service" metadata: @@ -266,14 +280,6 @@ objects: selector: name: "${DATABASE_SERVICE_NAME}" - apiVersion: v1 - kind: ImageStream - metadata: - name: cfme-openshift-postgresql - annotations: - description: "Keeps track of changes in the CloudForms postgresql image" - spec: - dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql -- apiVersion: v1 kind: "DeploymentConfig" metadata: name: "${DATABASE_SERVICE_NAME}" @@ -307,11 +313,11 @@ objects: - name: "cfme-pgdb-volume" persistentVolumeClaim: - claimName: ${DATABASE_SERVICE_NAME} + claimName: "${NAME}-${DATABASE_SERVICE_NAME}" containers: - name: "postgresql" - image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}" + image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}" ports: - containerPort: 5432 @@ -350,8 +356,11 @@ objects: name: "POSTGRESQL_SHARED_BUFFERS" value: "${POSTGRESQL_SHARED_BUFFERS}" resources: + requests: + memory: "${POSTGRESQL_MEM_REQ}" + cpu: "${POSTGRESQL_CPU_REQ}" limits: - memory: "${MEMORY_POSTGRESQL_LIMIT}" + memory: "${POSTGRESQL_MEM_LIMIT}" parameters: - @@ -420,36 +429,87 @@ parameters: name: "POSTGRESQL_SHARED_BUFFERS" displayName: "PostgreSQL Shared Buffer Amount" description: "Amount of memory dedicated for PostgreSQL shared memory buffers." - value: "64MB" + value: "256MB" - - name: "MEMORY_APPLICATION_MIN" - displayName: "Application Memory Minimum" + name: "APPLICATION_CPU_REQ" + displayName: "Application Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Application container will need (expressed in millicores)." + value: "1000m" + - + name: "POSTGRESQL_CPU_REQ" + displayName: "PostgreSQL Min CPU Requested" + required: true + description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)." + value: "500m" + - + name: "MEMCACHED_CPU_REQ" + displayName: "Memcached Min CPU Requested" + required: true + description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)." + value: "200m" + - + name: "APPLICATION_MEM_REQ" + displayName: "Application Min RAM Requested" required: true description: "Minimum amount of memory the Application container will need." - value: "4096Mi" + value: "6144Mi" + - + name: "POSTGRESQL_MEM_REQ" + displayName: "PostgreSQL Min RAM Requested" + required: true + description: "Minimum amount of memory the PostgreSQL container will need." + value: "1024Mi" - - name: "MEMORY_POSTGRESQL_LIMIT" - displayName: "PostgreSQL Memory Limit" + name: "MEMCACHED_MEM_REQ" + displayName: "Memcached Min RAM Requested" required: true - description: "Maximum amount of memory the PostgreSQL container can use." - value: "2048Mi" + description: "Minimum amount of memory the Memcached container will need." + value: "64Mi" - - name: "MEMORY_MEMCACHED_LIMIT" - displayName: "Memcached Memory Limit" + name: "APPLICATION_MEM_LIMIT" + displayName: "Application Max RAM Limit" required: true - description: "Maximum amount of memory the Memcached container can use." + description: "Maximum amount of memory the Application container can consume." + value: "16384Mi" + - + name: "POSTGRESQL_MEM_LIMIT" + displayName: "PostgreSQL Max RAM Limit" + required: true + description: "Maximum amount of memory the PostgreSQL container can consume." + value: "8192Mi" + - + name: "MEMCACHED_MEM_LIMIT" + displayName: "Memcached Max RAM Limit" + required: true + description: "Maximum amount of memory the Memcached container can consume." value: "256Mi" - + name: "POSTGRESQL_IMG_NAME" + displayName: "PostgreSQL Image Name" + description: "This is the PostgreSQL image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql" + - name: "POSTGRESQL_IMG_TAG" displayName: "PostgreSQL Image Tag" description: "This is the PostgreSQL image tag/version requested to deploy." value: "latest" - + name: "MEMCACHED_IMG_NAME" + displayName: "Memcached Image Name" + description: "This is the Memcached image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-memcached" + - name: "MEMCACHED_IMG_TAG" displayName: "Memcached Image Tag" description: "This is the Memcached image tag/version requested to deploy." value: "latest" - + name: "APPLICATION_IMG_NAME" + displayName: "Application Image Name" + description: "This is the Application image name requested to deploy." + value: "registry.access.redhat.com/cloudforms45/cfme-openshift-app" + - name: "APPLICATION_IMG_TAG" displayName: "Application Image Tag" description: "This is the Application image tag/version requested to deploy." @@ -464,16 +524,22 @@ parameters: displayName: "Application Init Delay" required: true description: "Delay in seconds before we attempt to initialize the application." - value: "30" + value: "15" - name: "APPLICATION_VOLUME_CAPACITY" displayName: "Application Volume Capacity" required: true description: "Volume space available for application data." - value: "1Gi" + value: "5Gi" + - + name: "APPLICATION_REGION_VOLUME_CAPACITY" + displayName: "Application Region Volume Capacity" + required: true + description: "Volume space available for region application data." + value: "5Gi" - name: "DATABASE_VOLUME_CAPACITY" displayName: "Database Volume Capacity" required: true description: "Volume space available for database." - value: "1Gi" + value: "15Gi" diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml new file mode 100644 index 000000000..240f6cbdf --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-pv-example.yaml @@ -0,0 +1,58 @@ +# +# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Template +parameters: +- name: HAWKULAR_SERVICES_DATA_LIMIT + description: Maximum amount data used by hawkular-services container (mostly logging) + displayName: Hawkular Services Container Data Limit + value: 1Gi +- name: CASSANDRA_DATA_LIMIT + description: Maximum amount data used by Cassandra container + displayName: Cassandra Container Data Limit + value: 2Gi + +objects: +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: h-services-pv + labels: + type: h-services + spec: + capacity: + storage: ${HAWKULAR_SERVICES_DATA_LIMIT} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: /tmp/pv-services +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: cassandra-pv + labels: + type: cassandra + spec: + capacity: + storage: ${CASSANDRA_DATA_LIMIT} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: /tmp/pv-cassandra diff --git a/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml new file mode 100644 index 000000000..bbc0c7044 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/jboss-middleware-manager-template.yaml @@ -0,0 +1,254 @@ +# +# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Template +metadata: + name: hawkular-services + annotations: + openshift.io/display-name: Hawkular Services + description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory). + iconClass: icon-wildfly + tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra + +parameters: +- name: HAWKULAR_SERVICES_IMAGE + description: What docker image should be used for hawkular-services. + displayName: Hawkular Services Docker Image + value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest +- name: CASSANDRA_IMAGE + description: What docker image should be used for cassandra node. + displayName: Cassandra Docker Image + value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0 +- name: CASSANDRA_MEMORY_LIMIT + description: Maximum amount of memory for Cassandra container. + displayName: Cassandra Memory Limit + value: 2Gi +- name: CASSANDRA_DATA_LIMIT + description: Maximum amount data used by Cassandra container. + displayName: Cassandra Container Data Limit + value: 2Gi +- name: HAWKULAR_SERVICES_DATA_LIMIT + description: Maximum amount data used by hawkular-services container (mostly logging). + displayName: Hawkular Services Container Data Limit + value: 1Gi +- name: ROUTE_NAME + description: Public route with this name will be created. + displayName: Route Name + value: hawkular-services +- name: ROUTE_HOSTNAME + description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted. + displayName: Hostname +- name: HAWKULAR_USER + description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated. + displayName: Hawkular User + from: '[a-zA-Z0-9]{16}' + generate: expression +- name: HAWKULAR_PASSWORD + description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated. + displayName: Hawkular Password + from: '[a-zA-Z0-9]{16}' + generate: expression +labels: + template: hawkular-services +message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD} + +objects: +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Exposes and load balances the application pods + service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]' + name: hawkular-services + spec: + ports: + - name: http-8080-tcp + port: 8080 + protocol: TCP + targetPort: 8080 + - name: admin-9990-tcp + port: 9990 + protocol: TCP + targetPort: 9990 + selector: + name: hawkular-services + type: ClusterIP +- apiVersion: v1 + kind: Service + metadata: + annotations: + description: Cassandra Service + name: hawkular-cassandra + spec: + ports: + - name: cql-9042-tcp + port: 9042 + protocol: TCP + targetPort: 9042 + selector: + name: hawkular-cassandra +- apiVersion: v1 + kind: Route + metadata: + name: ${ROUTE_NAME} + spec: + host: ${ROUTE_HOSTNAME} + to: + kind: Service + name: hawkular-services + port: + targetPort: http-8080-tcp + +- apiVersion: v1 + kind: DeploymentConfig + metadata: + annotations: + description: Defines how to deploy the application server + name: hawkular-services + spec: + replicas: 1 + selector: + name: hawkular-services + strategy: + type: Rolling + template: + metadata: + labels: + name: hawkular-services + spec: + containers: + - image: ${HAWKULAR_SERVICES_IMAGE} + env: + - name: HAWKULAR_BACKEND + value: remote + - name: CASSANDRA_NODES + value: hawkular-cassandra + - name: HAWKULAR_USER + value: ${HAWKULAR_USER} + - name: HAWKULAR_PASSWORD + value: ${HAWKULAR_PASSWORD} + imagePullPolicy: IfNotPresent + name: hawkular-services + volumeMounts: + - name: h-services-data + mountPath: /var/opt/hawkular + ports: + - containerPort: 8080 + - containerPort: 9990 + livenessProbe: + exec: + command: + - /opt/hawkular/bin/ready.sh + initialDelaySeconds: 180 + timeoutSeconds: 3 + readinessProbe: + exec: + command: + - /opt/hawkular/bin/ready.sh + initialDelaySeconds: 120 + timeoutSeconds: 3 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 12 + resources: + requests: + memory: 1024Mi + cpu: 2000m + dnsPolicy: ClusterFirst + restartPolicy: Always + volumes: + - name: h-services-data + persistentVolumeClaim: + claimName: h-services-pvc + +- apiVersion: v1 + kind: DeploymentConfig + metadata: + annotations: + description: Defines how to deploy the cassandra + name: hawkular-cassandra + spec: + replicas: 1 + selector: + name: hawkular-cassandra + strategy: + type: Recreate + rollingParams: + timeoutSeconds: 300 + template: + metadata: + labels: + name: hawkular-cassandra + spec: + containers: + - image: ${CASSANDRA_IMAGE} + imagePullPolicy: Always + name: hawkular-cassandra + env: + - name: DATA_VOLUME + value: /var/lib/cassandra + volumeMounts: + - name: cassandra-data + mountPath: /var/lib/cassandra + ports: + - containerPort: 9042 + - containerPort: 9160 + readinessProbe: + exec: + command: ['nodetool', 'status'] + initialDelaySeconds: 30 + timeoutSeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 3 + livenessProbe: + exec: + command: ['nodetool', 'status'] + initialDelaySeconds: 300 + timeoutSeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + memory: ${CASSANDRA_MEMORY_LIMIT} + volumes: + - name: cassandra-data + persistentVolumeClaim: + claimName: cassandra-pvc + +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: h-services-pvc + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: cassandra-pvc + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json index f347f1f9f..40f8b7933 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json @@ -23,7 +23,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root-password": "{.data['database-root-password']}" + } }, "stringData" : { "database-user" : "${MYSQL_USER}", @@ -35,7 +40,10 @@ "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json index 6ed744777..3d8f592cb 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json @@ -23,7 +23,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root-password": "{.data['database-root-password']}" + } }, "stringData" : { "database-user" : "${MYSQL_USER}", @@ -35,7 +40,10 @@ "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mariadb\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json index 97a8abf6d..894cba750 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json @@ -24,7 +24,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-admin-password": "{.data['database-admin-password']}" + } }, "stringData" : { "database-user" : "${MONGODB_USER}", @@ -37,7 +42,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json index 0656219fb..d5c25a5bb 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json @@ -24,7 +24,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-admin-password": "{.data['database-admin-password']}" + } }, "stringData" : { "database-user" : "${MONGODB_USER}", @@ -37,7 +42,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "mongodb://{.spec.clusterIP}:{.spec.ports[?(.name==\"mongo\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json index d60b4647d..10f3bb09e 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json @@ -23,7 +23,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root-password": "{.data['database-root-password']}" + } }, "stringData" : { "database-user" : "${MYSQL_USER}", @@ -36,7 +41,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json index c2bfa40fd..2fd82093a 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json @@ -23,7 +23,12 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}", + "template.openshift.io/expose-root-password": "{.data['database-root-password']}" + } }, "stringData" : { "database-user" : "${MYSQL_USER}", @@ -35,7 +40,10 @@ "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-uri": "mysql://{.spec.clusterIP}:{.spec.ports[?(.name==\"mysql\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json index 7a16e742a..c37102cb0 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json @@ -24,7 +24,11 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}" + } }, "stringData" : { "database-user" : "${POSTGRESQL_USER}", @@ -36,7 +40,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json index 242212d6f..32dc93a95 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json @@ -24,7 +24,11 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['database-user']}", + "template.openshift.io/expose-password": "{.data['database-password']}" + } }, "stringData" : { "database-user" : "${POSTGRESQL_USER}", @@ -36,7 +40,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "postgres://{.spec.clusterIP}:{.spec.ports[?(.name==\"postgresql\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json index e9af50937..6bb683e52 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json @@ -24,7 +24,10 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-password": "{.data['database-password']}" + } }, "stringData" : { "database-password" : "${REDIS_PASSWORD}" @@ -35,7 +38,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json index aa27578a9..9e8be2309 100644 --- a/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json @@ -24,7 +24,10 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}" + "name": "${DATABASE_SERVICE_NAME}", + "annotations": { + "template.openshift.io/expose-password": "{.data['database-password']}" + } }, "stringData" : { "database-password" : "${REDIS_PASSWORD}" @@ -35,7 +38,10 @@ "apiVersion": "v1", "metadata": { "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "redis://{.spec.clusterIP}:{.spec.ports[?(.name==\"redis\")].port}" + } }, "spec": { "ports": [ diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json index 2583018b7..6cef21945 100644 --- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json @@ -7,6 +7,51 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "httpd", + "annotations": { + "openshift.io/display-name": "Httpd" + } + }, + "spec": { + "tags": [ + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Httpd (Latest)", + "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", + "iconClass": "icon-apache", + "tags": "builder,httpd", + "supports":"httpd", + "sampleRepo": "https://github.com/openshift/httpd-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "2.4" + } + }, + { + "name": "2.4", + "annotations": { + "openshift.io/display-name": "Httpd 2.4", + "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", + "iconClass": "icon-apache", + "tags": "builder,httpd", + "supports":"httpd", + "version": "2.4", + "sampleRepo": "https://github.com/openshift/httpd-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "centos/httpd-24-centos7:latest" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "ruby", "annotations": { "openshift.io/display-name": "Ruby" diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json index b65f0a5e3..abdae01e3 100644 --- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json @@ -7,6 +7,51 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { + "name": "httpd", + "annotations": { + "openshift.io/display-name": "Httpd" + } + }, + "spec": { + "tags": [ + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Httpd (Latest)", + "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.", + "iconClass": "icon-apache", + "tags": "builder,httpd", + "supports":"httpd", + "sampleRepo": "https://github.com/openshift/httpd-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "2.4" + } + }, + { + "name": "2.4", + "annotations": { + "openshift.io/display-name": "Httpd 2.4", + "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.", + "iconClass": "icon-apache", + "tags": "builder,httpd", + "supports":"httpd", + "version": "2.4", + "sampleRepo": "https://github.com/openshift/httpd-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/httpd-24-rhel7" + } + } + ] + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { "name": "ruby", "annotations": { "openshift.io/display-name": "Ruby" diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md index f48d8d4a8..6d2ccbf7f 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md @@ -17,6 +17,7 @@ instantiating them. * [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex). * [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex). * [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex). +* [Httpd](https://raw.githubusercontent.com/openshift/httpd-ex/master/openshift/templates/httpd.json) - Provides a basic Httpd static content application. For more information see the [source repository](https://github.com/openshift/httpd-ex). * [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex). * [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex). diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json index eb3d296be..8c79d3340 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json @@ -60,7 +60,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json index da2454d2e..0f75f773f 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json @@ -60,7 +60,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json index ec335daa0..f564d4606 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json index 6304586dd..48283bfc2 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json index 152bf1c7c..180eeb967 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json index f3b5f97f3..da79c8dd0 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json new file mode 100644 index 000000000..5bfb4b019 --- /dev/null +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/httpd.json @@ -0,0 +1,274 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "httpd-example", + "annotations": { + "openshift.io/display-name": "Httpd", + "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", + "tags": "quickstart,httpd", + "iconClass": "icon-apache", + "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.", + "template.openshift.io/provider-display-name": "Red Hat, Inc.", + "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex", + "template.openshift.io/support-url": "https://access.redhat.com" + } + }, + "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", + "labels": { + "template": "httpd-example" + }, + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Exposes and load balances the application pods" + } + }, + "spec": { + "ports": [ + { + "name": "web", + "port": 8080, + "targetPort": 8080 + } + ], + "selector": { + "name": "${NAME}" + } + } + }, + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } + }, + "spec": { + "host": "${APPLICATION_DOMAIN}", + "to": { + "kind": "Service", + "name": "${NAME}" + } + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Keeps track of changes in the application image" + } + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to build the application" + } + }, + "spec": { + "source": { + "type": "Git", + "git": { + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" + }, + "contextDir": "${CONTEXT_DIR}" + }, + "strategy": { + "type": "Source", + "sourceStrategy": { + "from": { + "kind": "ImageStreamTag", + "namespace": "${NAMESPACE}", + "name": "httpd:2.4" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + }, + "triggers": [ + { + "type": "ImageChange" + }, + { + "type": "ConfigChange" + }, + { + "type": "GitHub", + "github": { + "secret": "${GITHUB_WEBHOOK_SECRET}" + } + }, + { + "type": "Generic", + "generic": { + "secret": "${GENERIC_WEBHOOK_SECRET}" + } + } + ] + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "annotations": { + "description": "Defines how to deploy the application server" + } + }, + "spec": { + "strategy": { + "type": "Rolling" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "httpd-example" + ], + "from": { + "kind": "ImageStreamTag", + "name": "${NAME}:latest" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "name": "${NAME}" + }, + "template": { + "metadata": { + "name": "${NAME}", + "labels": { + "name": "${NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "httpd-example", + "image": " ", + "ports": [ + { + "containerPort": 8080 + } + ], + "readinessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 3, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "livenessProbe": { + "timeoutSeconds": 3, + "initialDelaySeconds": 30, + "httpGet": { + "path": "/", + "port": 8080 + } + }, + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, + "env": [ + ], + "resources": { + "limits": { + "memory": "${MEMORY_LIMIT}" + } + } + } + ] + } + } + } + } + ], + "parameters": [ + { + "name": "NAME", + "displayName": "Name", + "description": "The name assigned to all of the frontend objects defined in this template.", + "required": true, + "value": "httpd-example" + }, + { + "name": "NAMESPACE", + "displayName": "Namespace", + "description": "The OpenShift Namespace where the ImageStream resides.", + "required": true, + "value": "openshift" + }, + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "required": true, + "value": "512Mi" + }, + { + "name": "SOURCE_REPOSITORY_URL", + "displayName": "Git Repository URL", + "description": "The URL of the repository with your application source code.", + "required": true, + "value": "https://github.com/openshift/httpd-ex.git" + }, + { + "name": "SOURCE_REPOSITORY_REF", + "displayName": "Git Reference", + "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch." + }, + { + "name": "CONTEXT_DIR", + "displayName": "Context Directory", + "description": "Set this to the relative path to your project if it is not in the root of your repository." + }, + { + "name": "APPLICATION_DOMAIN", + "displayName": "Application Hostname", + "description": "The exposed hostname that will route to the httpd service, if left blank a value will be defaulted.", + "value": "" + }, + { + "name": "GITHUB_WEBHOOK_SECRET", + "displayName": "GitHub Webhook Secret", + "description": "A secret string used to configure the GitHub webhook.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + }, + { + "name": "GENERIC_WEBHOOK_SECRET", + "displayName": "Generic Webhook Secret", + "description": "A secret string used to configure the Generic webhook.", + "generate": "expression", + "from": "[a-zA-Z0-9]{40}" + } + ] +} diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json index 264e4b2de..ce96684a9 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json @@ -22,7 +22,10 @@ "apiVersion": "v1", "metadata": { "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "to": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json index b47bdf353..34b2b920b 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json @@ -22,7 +22,10 @@ "apiVersion": "v1", "metadata": { "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null + "creationTimestamp": null, + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "to": { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json index c570ca5d5..167370811 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", @@ -102,7 +105,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "nodejs:4" + "name": "nodejs:6" }, "env": [ { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json index 161f1582e..214c110d2 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json @@ -58,7 +58,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", @@ -102,7 +105,7 @@ "from": { "kind": "ImageStreamTag", "namespace": "${NAMESPACE}", - "name": "nodejs:4" + "name": "nodejs:6" }, "env": [ { diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json index b400cfdb3..82a979379 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json @@ -23,7 +23,11 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['application-user']}", + "template.openshift.io/expose-password": "{.data['application-password']}" + } }, "stringData" : { "database-user" : "${DATABASE_USER}", @@ -60,7 +64,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json index fa67412ff..f32c4fc4a 100644 --- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json @@ -23,7 +23,11 @@ "kind": "Secret", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-username": "{.data['application-user']}", + "template.openshift.io/expose-password": "{.data['application-password']}" + } }, "stringData" : { "database-user" : "${DATABASE_USER}", @@ -60,7 +64,10 @@ "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "${NAME}" + "name": "${NAME}", + "annotations": { + "template.openshift.io/expose-uri": "http://{.spec.host}{.spec.path}" + } }, "spec": { "host": "${APPLICATION_DOMAIN}", diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index d09358bee..3a866cedf 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -1,14 +1,24 @@ --- -- name: Install docker excluder - package: - name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" - state: "{{ r_openshift_excluder_docker_package_state }}" - when: - - r_openshift_excluder_enable_docker_excluder | bool - -- name: Install openshift excluder - package: - name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" - state: "{{ r_openshift_excluder_package_state }}" - when: - - r_openshift_excluder_enable_openshift_excluder | bool + +- when: + - not openshift.common.is_atomic | bool + - r_openshift_excluder_install_ran is not defined + + block: + + - name: Install docker excluder + package: + name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" + state: "{{ r_openshift_excluder_docker_package_state }}" + when: + - r_openshift_excluder_enable_docker_excluder | bool + + - name: Install openshift excluder + package: + name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" + state: "{{ r_openshift_excluder_package_state }}" + when: + - r_openshift_excluder_enable_openshift_excluder | bool + + - set_fact: + r_openshift_excluder_install_ran: True diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 28b388560..cc4dc9365 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,2 +1,2 @@ --- -use_system_containers: false +openshift_use_system_containers: false diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index cfe092a28..663423061 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -537,6 +537,7 @@ def set_node_schedulability(facts): return facts +# pylint: disable=too-many-branches def set_selectors(facts): """ Set selectors facts if not already present in facts dict Args: @@ -570,6 +571,10 @@ def set_selectors(facts): facts['hosted']['logging'] = {} if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']: facts['hosted']['logging']['selector'] = None + if 'etcd' not in facts['hosted']: + facts['hosted']['etcd'] = {} + if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']: + facts['hosted']['etcd']['selector'] = None return facts @@ -1654,6 +1659,7 @@ def set_proxy_facts(facts): common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) # We always add local dns domain and ourselves no matter what common['no_proxy'].append('.' + common['dns_domain']) + common['no_proxy'].append('.svc') common['no_proxy'].append(common['hostname']) common['no_proxy'] = ','.join(sort_unique(common['no_proxy'])) facts['common'] = common @@ -2156,6 +2162,25 @@ class OpenShiftFacts(object): create_pvc=False ) ), + etcd=dict( + storage=dict( + kind=None, + volume=dict( + name='etcd', + size='1Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)' + ), + host=None, + access=dict( + modes=['ReadWriteOnce'] + ), + create_pv=True, + create_pvc=False + ) + ), registry=dict( storage=dict( kind=None, diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 1b9bda67e..451386bf1 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -9,10 +9,10 @@ l_is_atomic: "{{ ostree_booted.stat.exists }}" - set_fact: l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" - l_is_openvswitch_system_container: "{{ (use_openvswitch_system_container | default(use_system_containers) | bool) }}" - l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}" - l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}" - l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}" + l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers) | bool) }}" + l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers) | bool) }}" + l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers) | bool) }}" + l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers) | bool) }}" - set_fact: l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" - set_fact: @@ -24,12 +24,18 @@ msg: | openshift-ansible requires Python 3 for {{ ansible_distribution }}; For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html - when: ansible_distribution == 'Fedora' and ansible_python['version']['major'] != 3 + when: + - ansible_distribution == 'Fedora' + - ansible_python['version']['major'] != 3 + - r_openshift_facts_ran is not defined - name: Validate python version fail: msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" - when: ansible_distribution != 'Fedora' and ansible_python['version']['major'] != 2 + when: + - ansible_distribution != 'Fedora' + - ansible_python['version']['major'] != 2 + - r_openshift_facts_ran is not defined # Fail as early as possible if Atomic and old version of Docker - block: @@ -48,7 +54,9 @@ that: - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') - when: l_is_atomic | bool + when: + - l_is_atomic | bool + - r_openshift_facts_ran is not defined - name: Load variables include_vars: "{{ item }}" @@ -59,7 +67,9 @@ - name: Ensure various deps are installed package: name={{ item }} state=present with_items: "{{ required_packages }}" - when: not l_is_atomic | bool + when: + - not l_is_atomic | bool + - r_openshift_facts_ran is not defined - name: Ensure various deps for running system containers are installed package: name={{ item }} state=present @@ -67,6 +77,7 @@ when: - not l_is_atomic | bool - l_any_system_container | bool + - r_openshift_facts_ran is not defined - name: Gather Cluster facts and set is_containerized if needed openshift_facts: @@ -99,3 +110,7 @@ - name: Set repoquery command set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + +- name: Register that this already ran + set_fact: + r_openshift_facts_ran: True diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 27e6fe383..26bf4c09b 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -1,8 +1,24 @@ -# pylint: disable=missing-docstring +"""Check that required Docker images are available.""" + from openshift_checks import OpenShiftCheck, get_var from openshift_checks.mixins import DockerHostMixin +NODE_IMAGE_SUFFIXES = ["haproxy-router", "docker-registry", "deployer", "pod"] +DEPLOYMENT_IMAGE_INFO = { + "origin": { + "namespace": "openshift", + "name": "origin", + "registry_console_image": "cockpit/kubernetes", + }, + "openshift-enterprise": { + "namespace": "openshift3", + "name": "ose", + "registry_console_image": "registry.access.redhat.com/openshift3/registry-console", + }, +} + + class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): """Check that required Docker images are available. @@ -13,25 +29,13 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): name = "docker_image_availability" tags = ["preflight"] - dependencies = ["skopeo", "python-docker-py"] - deployment_image_info = { - "origin": { - "namespace": "openshift", - "name": "origin", - }, - "openshift-enterprise": { - "namespace": "openshift3", - "name": "ose", - }, - } - @classmethod def is_active(cls, task_vars): """Skip hosts with unsupported deployment types.""" deployment_type = get_var(task_vars, "openshift_deployment_type") - has_valid_deployment_type = deployment_type in cls.deployment_image_info + has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type @@ -70,51 +74,55 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): return {"changed": changed} - def required_images(self, task_vars): - deployment_type = get_var(task_vars, "openshift_deployment_type") - image_info = self.deployment_image_info[deployment_type] - - openshift_release = get_var(task_vars, "openshift_release", default="latest") - openshift_image_tag = get_var(task_vars, "openshift_image_tag") - is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") - - images = set(self.required_docker_images( - image_info["namespace"], - image_info["name"], - ["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names - openshift_release, - is_containerized, - )) - - # append images with qualified image tags to our list of required images. - # these are images with a (v0.0.0.0) tag, rather than a standard release - # format tag (v0.0). We want to check this set in both containerized and - # non-containerized installations. - images.update( - self.required_qualified_docker_images( - image_info["namespace"], - image_info["name"], - openshift_image_tag, - ), - ) - - return images - @staticmethod - def required_docker_images(namespace, name, additional_image_names, version, is_containerized): - if is_containerized: - return ["{}/{}:{}".format(namespace, name, version)] if name else [] - - # include additional non-containerized images specific to the current deployment type - return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names] - - @staticmethod - def required_qualified_docker_images(namespace, name, version): - # pylint: disable=invalid-name - return [ - "{}/{}-{}:{}".format(namespace, name, suffix, version) - for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"] - ] + def required_images(task_vars): + """ + Determine which images we expect to need for this host. + Returns: a set of required images like 'openshift/origin:v3.6' + + The thorny issue of determining the image names from the variables is under consideration + via https://github.com/openshift/openshift-ansible/issues/4415 + + For now we operate as follows: + * For containerized components (master, node, ...) we look at the deployment type and + use openshift/origin or openshift3/ose as the base for those component images. The + version is openshift_image_tag as determined by the openshift_version role. + * For OpenShift-managed infrastructure (router, registry...) we use oreg_url if + it is defined; otherwise we again use the base that depends on the deployment type. + Registry is not included in constructed images. It may be in oreg_url or etcd image. + """ + required = set() + deployment_type = get_var(task_vars, "openshift_deployment_type") + host_groups = get_var(task_vars, "group_names") + image_tag = get_var(task_vars, "openshift_image_tag") + image_info = DEPLOYMENT_IMAGE_INFO[deployment_type] + if not image_info: + return required + + # template for images that run on top of OpenShift + image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}") + image_url = get_var(task_vars, "oreg_url", default="") or image_url + if 'nodes' in host_groups: + for suffix in NODE_IMAGE_SUFFIXES: + required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag)) + # The registry-console is for some reason not prefixed with ose- like the other components. + # Nor is it versioned the same, so just look for latest. + # Also a completely different name is used for Origin. + required.add(image_info["registry_console_image"]) + + # images for containerized components + if get_var(task_vars, "openshift", "common", "is_containerized"): + components = set() + if 'nodes' in host_groups: + components.update(["node", "openvswitch"]) + if 'masters' in host_groups: # name is "origin" or "ose" + components.add(image_info["name"]) + for component in components: + required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) + if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise + required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag + + return required def local_images(self, images, task_vars): """Filter a list of images and return those available locally.""" @@ -124,7 +132,8 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): ] def is_image_local(self, image, task_vars): - result = self.module_executor("docker_image_facts", {"name": image}, task_vars) + """Check if image is already in local docker index.""" + result = self.execute_module("docker_image_facts", {"name": image}, task_vars=task_vars) if result.get("failed", False): return False @@ -132,6 +141,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): @staticmethod def known_docker_registries(task_vars): + """Build a list of docker registries available according to inventory vars.""" docker_facts = get_var(task_vars, "openshift", "docker") regs = set(docker_facts["additional_registries"]) @@ -147,17 +157,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): """Inspect existing images using Skopeo and return all images successfully inspected.""" return [ image for image in images - if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries) + if self.is_available_skopeo_image(image, registries, task_vars) ] - def is_available_skopeo_image(self, image, registry, task_vars): - """Uses Skopeo to determine if required image exists in a given registry.""" + def is_available_skopeo_image(self, image, registries, task_vars): + """Use Skopeo to determine if required image exists in known registry(s).""" + + # if image does already includes a registry, just use that + if image.count("/") > 1: + registry, image = image.split("/", 1) + registries = [registry] - cmd_str = "skopeo inspect docker://{registry}/{image}".format( - registry=registry, - image=image, - ) + for registry in registries: + args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)} + result = self.execute_module("command", args, task_vars=task_vars) + if result.get("rc", 0) == 0 and not result.get("failed"): + return True - args = {"_raw_params": cmd_str} - result = self.module_executor("command", args, task_vars) - return not result.get("failed", False) and result.get("rc", 0) == 0 + return False diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 7f1751b36..2bd615457 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -34,7 +34,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck): } # attempt to get the docker info hash from the API - info = self.execute_module("docker_info", {}, task_vars) + info = self.execute_module("docker_info", {}, task_vars=task_vars) if info.get("failed"): return {"failed": True, "changed": changed, "msg": "Failed to query Docker API. Is docker running on this host?"} @@ -146,7 +146,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck): vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name # should return free space like " 12.00g" if the VG exists; empty if it does not - ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars) + ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars=task_vars) if ret.get("failed") or ret.get("rc", 0) != 0: raise OpenShiftCheckException( "Is LVM installed? Failed to run /sbin/vgs " diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index 7f3d78cc4..2cb2e21aa 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -40,8 +40,11 @@ class DockerHostMixin(object): # NOTE: we would use the "package" module but it's actually an action plugin # and it's not clear how to invoke one of those. This is about the same anyway: - pkg_manager = get_var(task_vars, "ansible_pkg_mgr", default="yum") - result = self.module_executor(pkg_manager, {"name": self.dependencies, "state": "present"}, task_vars) + result = self.execute_module( + get_var(task_vars, "ansible_pkg_mgr", default="yum"), + {"name": self.dependencies, "state": "present"}, + task_vars=task_vars, + ) msg = result.get("msg", "") if result.get("failed"): if "No package matching" in msg: diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 1e45ae3af..2dd045f1f 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -43,7 +43,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck): }, ], } - return self.execute_module("rpm_version", args, task_vars) + return self.execute_module("rpm_version", args, task_vars=task_vars) def get_required_ovs_version(self, task_vars): """Return the correct Open vSwitch version for the current OpenShift version""" diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index a7eb720fd..0dd2b1286 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -25,7 +25,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): packages.update(self.node_packages(rpm_prefix)) args = {"packages": sorted(set(packages))} - return self.execute_module("check_yum_update", args, tmp, task_vars) + return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars) @staticmethod def master_packages(rpm_prefix): @@ -36,8 +36,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): "bash-completion", "cockpit-bridge", "cockpit-docker", - "cockpit-kubernetes", - "cockpit-shell", + "cockpit-system", "cockpit-ws", "etcd", "httpd-tools", diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py index fd0c0a755..f432380c6 100644 --- a/roles/openshift_health_checker/openshift_checks/package_update.py +++ b/roles/openshift_health_checker/openshift_checks/package_update.py @@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck): def run(self, tmp, task_vars): args = {"packages": []} - return self.execute_module("check_yum_update", args, tmp, task_vars) + return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars) diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 2e737818b..6a76bb93d 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -71,7 +71,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): ], } - return self.execute_module("aos_version", args, tmp, task_vars) + return self.execute_module("aos_version", args, tmp=tmp, task_vars=task_vars) def get_required_ovs_version(self, task_vars): """Return the correct Open vSwitch version for the current OpenShift version. diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 197c65f51..0a7c0f8d3 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -31,15 +31,15 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active (False, True), ]) def test_all_images_available_locally(is_containerized, is_atomic): - def execute_module(module_name, args, task_vars): + def execute_module(module_name, module_args, task_vars): if module_name == "yum": return {"changed": True} assert module_name == "docker_image_facts" - assert 'name' in args - assert args['name'] + assert 'name' in module_args + assert module_args['name'] return { - 'images': [args['name']], + 'images': [module_args['name']], } result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict( @@ -52,8 +52,8 @@ def test_all_images_available_locally(is_containerized, is_atomic): docker=dict(additional_registries=["docker.io"]), ), openshift_deployment_type='origin', - openshift_release='v3.4', openshift_image_tag='3.4', + group_names=['nodes', 'masters'], )) assert not result.get('failed', False) @@ -64,7 +64,7 @@ def test_all_images_available_locally(is_containerized, is_atomic): True, ]) def test_all_images_available_remotely(available_locally): - def execute_module(module_name, args, task_vars): + def execute_module(module_name, module_args, task_vars): if module_name == 'docker_image_facts': return {'images': [], 'failed': available_locally} return {'changed': False} @@ -79,8 +79,8 @@ def test_all_images_available_remotely(available_locally): docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]), ), openshift_deployment_type='origin', - openshift_release='3.4', openshift_image_tag='v3.4', + group_names=['nodes', 'masters'], )) assert not result.get('failed', False) @@ -108,8 +108,8 @@ def test_all_images_unavailable(): docker=dict(additional_registries=["docker.io"]), ), openshift_deployment_type="openshift-enterprise", - openshift_release=None, - openshift_image_tag='latest' + openshift_image_tag='latest', + group_names=['nodes', 'masters'], )) assert actual['failed'] @@ -147,8 +147,8 @@ def test_skopeo_update_failure(message, extra_words): docker=dict(additional_registries=["unknown.io"]), ), openshift_deployment_type="openshift-enterprise", - openshift_release='', openshift_image_tag='', + group_names=['nodes', 'masters'], )) assert actual["failed"] @@ -177,8 +177,85 @@ def test_registry_availability(deployment_type, registries): docker=dict(additional_registries=registries), ), openshift_deployment_type=deployment_type, - openshift_release='', openshift_image_tag='', + group_names=['nodes', 'masters'], )) assert not actual.get("failed", False) + + +@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ + ( # standard set of stuff required on nodes + "origin", False, ['nodes'], None, + set([ + 'openshift/origin-pod:vtest', + 'openshift/origin-deployer:vtest', + 'openshift/origin-docker-registry:vtest', + 'openshift/origin-haproxy-router:vtest', + 'cockpit/kubernetes', # origin version of registry-console + ]) + ), + ( # set a different URL for images + "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}', + set([ + 'foo.io/openshift/origin-pod:vtest', + 'foo.io/openshift/origin-deployer:vtest', + 'foo.io/openshift/origin-docker-registry:vtest', + 'foo.io/openshift/origin-haproxy-router:vtest', + 'cockpit/kubernetes', # AFAICS this is not built from the URL + ]) + ), + ( + "origin", True, ['nodes', 'masters', 'etcd'], None, + set([ + # images running on top of openshift + 'openshift/origin-pod:vtest', + 'openshift/origin-deployer:vtest', + 'openshift/origin-docker-registry:vtest', + 'openshift/origin-haproxy-router:vtest', + 'cockpit/kubernetes', + # containerized component images + 'openshift/origin:vtest', + 'openshift/node:vtest', + 'openshift/openvswitch:vtest', + 'registry.access.redhat.com/rhel7/etcd', + ]) + ), + ( # enterprise images + "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45', + set([ + 'foo.io/openshift3/ose-pod:f13ac45', + 'foo.io/openshift3/ose-deployer:f13ac45', + 'foo.io/openshift3/ose-docker-registry:f13ac45', + 'foo.io/openshift3/ose-haproxy-router:f13ac45', + # registry-console is not constructed/versioned the same as the others. + 'registry.access.redhat.com/openshift3/registry-console', + # containerized images aren't built from oreg_url + 'openshift3/node:vtest', + 'openshift3/openvswitch:vtest', + ]) + ), + ( + "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45', + set([ + 'registry.access.redhat.com/rhel7/etcd', + # lb does not yet come in a containerized version + ]) + ), + +]) +def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected): + task_vars = dict( + openshift=dict( + common=dict( + is_containerized=is_containerized, + is_atomic=False, + ), + ), + openshift_deployment_type=deployment_type, + group_names=groups, + oreg_url=oreg_url, + openshift_image_tag='vtest', + ) + + assert expected == DockerImageAvailability("DUMMY").required_images(task_vars) diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 292a323db..876614b1d 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -77,7 +77,7 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}} ), ]) def test_check_storage_driver(docker_info, failed, expect_msg): - def execute_module(module_name, args, tmp=None, task_vars=None): + def execute_module(module_name, module_args, tmp=None, task_vars=None): if module_name == "yum": return {} if module_name != "docker_info": @@ -187,7 +187,7 @@ def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg): ) ]) def test_vg_free(pool, command_returns, raises, returns): - def execute_module(module_name, args, tmp=None, task_vars=None): + def execute_module(module_name, module_args, tmp=None, task_vars=None): if module_name != "command": raise ValueError("not expecting module " + module_name) return command_returns diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 751489958..d895e9a68 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -124,6 +124,35 @@ edits: "{{ openshift_hosted_registry_edits }}" force: "{{ True|bool in openshift_hosted_registry_force }}" +- name: Ensure OpenShift registry correctly rolls out (best-effort today) + command: | + oc rollout status deploymentconfig {{ openshift_hosted_registry_name }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig + async: 600 + poll: 15 + failed_when: false + +- name: Determine the latest version of the OpenShift registry deployment + command: | + oc get deploymentconfig {{ openshift_hosted_registry_name }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: openshift_hosted_registry_latest_version + +- name: Sanity-check that the OpenShift registry rolled out correctly + command: | + oc get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \ + --namespace {{ openshift_hosted_registry_namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_registry_rc_phase + until: "'Running' not in openshift_hosted_registry_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_registry_rc_phase.stdout" + - include: storage/glusterfs.yml when: - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index e6bb196b8..c504bfb80 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" + src: "{% if 'glusterfs_registry' in groups %}{{ groups.glusterfs_registry[0] }}{% else %}{{ groups.glusterfs[0] }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index 192afc87a..160ae2f5e 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -55,7 +55,7 @@ state: present with_items: "{{ openshift_hosted_routers }}" -- name: Grant the router serivce account(s) access to the appropriate scc +- name: Grant the router service account(s) access to the appropriate scc oc_adm_policy_user: user: "system:serviceaccount:{{ item.namespace }}:{{ item.serviceaccount }}" namespace: "{{ item.namespace }}" @@ -89,18 +89,37 @@ ports: "{{ item.ports }}" stats_port: "{{ item.stats_port }}" with_items: "{{ openshift_hosted_routers }}" - register: routerout -# This should probably move to module -- name: wait for deploy - pause: - seconds: 30 - when: routerout.changed +- name: Ensure OpenShift router correctly rolls out (best-effort today) + command: | + oc rollout status deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace | default('default') }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig + async: 600 + poll: 15 + with_items: "{{ openshift_hosted_routers }}" + failed_when: false -- name: Ensure router replica count matches desired - oc_scale: - kind: dc - name: "{{ item.name | default('router') }}" - namespace: "{{ item.namespace | default('default') }}" - replicas: "{{ item.replicas }}" +- name: Determine the latest version of the OpenShift router deployment + command: | + oc get deploymentconfig {{ item.name }} \ + --namespace {{ item.namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .status.latestVersion }' + register: openshift_hosted_routers_latest_version with_items: "{{ openshift_hosted_routers }}" + +- name: Poll for OpenShift router deployment success + command: | + oc get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ + --namespace {{ item.0.namespace }} \ + --config {{ openshift.common.config_base }}/master/admin.kubeconfig \ + -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' + register: openshift_hosted_router_rc_phase + until: "'Running' not in openshift_hosted_router_rc_phase.stdout" + delay: 15 + retries: 40 + failed_when: "'Failed' in openshift_hosted_router_rc_phase.stdout" + with_together: + - "{{ openshift_hosted_routers }}" + - "{{ openshift_hosted_routers_latest_version.results }}" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 0c60ef6fd..dd0f22d4b 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -55,6 +55,9 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log - `openshift_logging_fluentd_use_journal`: NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver when using the default of empty. - `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false. - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. +- `openshift_logging_fluentd_buffer_queue_limit`: Buffer queue limit for Fluentd. Defaults to 1024. +- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m. + - `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'. - `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'. @@ -155,3 +158,5 @@ Elasticsearch OPS too, if using an OPS cluster: - `openshift_logging_mux_namespaces`: Default `[]` - additional namespaces to create for _external_ mux clients to associate with their logs - users will need to set this +- `openshift_logging_mux_buffer_queue_limit`: Default `[1024]` - Buffer queue limit for Mux. +- `openshift_logging_mux_buffer_size_limit`: Default `[1m]` - Buffer chunk limit for Mux. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 573cbdd09..66d880d23 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -76,6 +76,8 @@ openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" openshift_logging_fluentd_hosts: ['--all'] +openshift_logging_fluentd_buffer_queue_limit: 1024 +openshift_logging_fluentd_buffer_size_limit: 1m openshift_logging_es_host: logging-es openshift_logging_es_port: 9200 @@ -87,7 +89,7 @@ openshift_logging_es_cpu_limit: null # the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console' openshift_logging_es_log_appenders: ['file'] openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}" -openshift_logging_es_pv_selector: null +openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}" openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}" openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}" openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}" @@ -126,7 +128,7 @@ openshift_logging_es_ops_client_key: /etc/fluent/keys/key openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" openshift_logging_es_ops_cpu_limit: null openshift_logging_es_ops_memory_limit: "{{ openshift_hosted_logging_elasticsearch_ops_instance_ram | default('8Gi') }}" -openshift_logging_es_ops_pv_selector: None +openshift_logging_es_ops_pv_selector: "{{ openshift_hosted_loggingops_storage_labels | default(null) }}" openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}" openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}" openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}" diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 7c1062b77..66dc0e096 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -119,6 +119,12 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" + openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" + openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" + openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}" + openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}" + openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" with_together: - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}" @@ -141,6 +147,12 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" + openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}" + openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}" + openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}" + openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}" + openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}" + openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}" with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} when: diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 7e88a7498..d9ac52cb7 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -194,7 +194,9 @@ - port: 9200 targetPort: "restapi" -- name: Creating ES storage template +# storageclasses are used by default but if static then disable +# storageclasses with the storageClassName set to "" in pvc.j2 +- name: Creating ES storage template - static template: src: pvc.j2 dest: "{{ tempdir }}/templates/logging-es-pvc.yml" @@ -203,11 +205,13 @@ size: "{{ openshift_logging_elasticsearch_pvc_size }}" access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" + storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}" when: - openshift_logging_elasticsearch_storage_type == "pvc" - not openshift_logging_elasticsearch_pvc_dynamic -- name: Creating ES storage template +# Storageclasses are used by default if configured +- name: Creating ES storage template - dynamic template: src: pvc.j2 dest: "{{ tempdir }}/templates/logging-es-pvc.yml" @@ -216,8 +220,6 @@ size: "{{ openshift_logging_elasticsearch_pvc_size }}" access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}" pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}" - annotations: - volume.alpha.kubernetes.io/storage-class: "dynamic" when: - openshift_logging_elasticsearch_storage_type == "pvc" - openshift_logging_elasticsearch_pvc_dynamic @@ -269,6 +271,75 @@ - "{{ tempdir }}/templates/logging-es-dc.yml" delete_after: true +- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component + slurp: + src: "{{ generated_certs_dir }}/{{ item.file }}" + register: key_pairs + with_items: + - { name: "ca_file", file: "ca.crt" } + - { name: "es_key", file: "system.logging.es.key" } + - { name: "es_cert", file: "system.logging.es.crt" } + when: openshift_logging_es_allow_external | bool + +- set_fact: + es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}" + when: + - openshift_logging_es_key | trim | length > 0 + - openshift_logging_es_allow_external | bool + changed_when: false + +- set_fact: + es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}" + when: + - openshift_logging_es_cert | trim | length > 0 + - openshift_logging_es_allow_external | bool + changed_when: false + +- set_fact: + es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}" + when: + - openshift_logging_es_ca_ext | trim | length > 0 + - openshift_logging_es_allow_external | bool + changed_when: false + +- set_fact: + es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}" + when: + - es_ca is not defined + - openshift_logging_es_allow_external | bool + changed_when: false + +- name: Generating Elasticsearch {{ es_component }} route template + template: + src: route_reencrypt.j2 + dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml" + vars: + obj_name: "logging-{{ es_component }}" + route_host: "{{ openshift_logging_es_hostname }}" + service_name: "logging-{{ es_component }}" + tls_key: "{{ es_key | default('') | b64decode }}" + tls_cert: "{{ es_cert | default('') | b64decode }}" + tls_ca_cert: "{{ es_ca | b64decode }}" + tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}" + edge_term_policy: "{{ openshift_logging_es_edge_term_policy | default('') }}" + labels: + component: support + logging-infra: support + provider: openshift + changed_when: no + when: openshift_logging_es_allow_external | bool + +# This currently has an issue if the host name changes +- name: Setting Elasticsearch {{ es_component }} route + oc_obj: + state: present + name: "logging-{{ es_component }}" + namespace: "{{ openshift_logging_elasticsearch_namespace }}" + kind: route + files: + - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml" + when: openshift_logging_es_allow_external | bool + ## Placeholder for migration when necessary ## - name: Delete temp directory diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 index 377abe21f..38948ba2f 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 @@ -35,6 +35,12 @@ appender: layout: type: consolePattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + # need this filter until https://github.com/openshift/origin/issues/14515 is fixed + filter: + 1: + type: org.apache.log4j.varia.StringMatchFilter + StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" + AcceptOnMatch: false file: type: dailyRollingFile @@ -43,6 +49,12 @@ appender: layout: type: pattern conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + # need this filter until https://github.com/openshift/origin/issues/14515 is fixed + filter: + 1: + type: org.apache.log4j.varia.StringMatchFilter + StringToMatch: "SSL Problem illegal change cipher spec msg, conn state = 6, handshake state = 1" + AcceptOnMatch: false # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 index 58c325c8a..141967c33 100644 --- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 @@ -14,8 +14,10 @@ index: flush_threshold_period: 5m node: + name: ${DC_NAME} master: ${IS_MASTER} data: ${HAS_DATA} + max_local_storage_nodes: 1 network: host: 0.0.0.0 @@ -60,7 +62,7 @@ path: searchguard: authcz.admin_dn: - CN=system.admin,OU=OpenShift,O=Logging - config_index_name: ".searchguard.${HOSTNAME}" + config_index_name: ".searchguard.${DC_NAME}" ssl: transport: enabled: true diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index bd2289f0d..844dbc8c2 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -58,6 +58,9 @@ spec: name: "cluster" env: - + name: "DC_NAME" + value: "{{deploy_name}}" + - name: "NAMESPACE" valueFrom: fieldRef: diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2 index f19a3a750..063f9c5ae 100644 --- a/roles/openshift_logging_elasticsearch/templates/pvc.j2 +++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2 @@ -25,3 +25,6 @@ spec: resources: requests: storage: {{size}} +{% if storage_class_name is defined %} + storageClassName: {{ storage_class_name }} +{% endif %} diff --git a/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 new file mode 100644 index 000000000..cf8a9e65f --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 @@ -0,0 +1,36 @@ +apiVersion: "v1" +kind: "Route" +metadata: + name: "{{obj_name}}" +{% if labels is defined%} + labels: +{% for key, value in labels.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} +spec: + host: {{ route_host }} + tls: +{% if tls_key is defined and tls_key | length > 0 %} + key: | +{{ tls_key|indent(6, true) }} +{% if tls_cert is defined and tls_cert | length > 0 %} + certificate: | +{{ tls_cert|indent(6, true) }} +{% endif %} +{% endif %} + caCertificate: | +{% for line in tls_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + destinationCACertificate: | +{% for line in tls_dest_ca_cert.split('\n') %} + {{ line }} +{% endfor %} + termination: reencrypt +{% if edge_term_policy is defined and edge_term_policy | length > 0 %} + insecureEdgeTerminationPolicy: {{ edge_term_policy }} +{% endif %} + to: + kind: Service + name: {{ service_name }} diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index e185938e3..a5695ee26 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -93,6 +93,14 @@ spec: value: "{{ openshift_logging_fluentd_journal_source | default('') }}" - name: "JOURNAL_READ_FROM_HEAD" value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_fluentd_buffer_size_limit }}" + - name: "FLUENTD_CPU_LIMIT" + value: "{{ openshift_logging_fluentd_cpu_limit }}" + - name: "FLUENTD_MEMORY_LIMIT" + value: "{{ openshift_logging_fluentd_memory_limit }}" volumes: - name: runlogjournal hostPath: diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml index 10fa4372c..77e47d38c 100644 --- a/roles/openshift_logging_mux/defaults/main.yml +++ b/roles/openshift_logging_mux/defaults/main.yml @@ -10,7 +10,9 @@ openshift_logging_mux_namespace: logging ### Common settings openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}" openshift_logging_mux_cpu_limit: 500m -openshift_logging_mux_memory_limit: 1Gi +openshift_logging_mux_memory_limit: 2Gi +openshift_logging_mux_buffer_queue_limit: 1024 +openshift_logging_mux_buffer_size_limit: 1m openshift_logging_mux_replicas: 1 diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2 index 502cd3347..243698c6a 100644 --- a/roles/openshift_logging_mux/templates/mux.j2 +++ b/roles/openshift_logging_mux/templates/mux.j2 @@ -103,6 +103,14 @@ spec: value: "true" - name: MUX_ALLOW_EXTERNAL value: "{{ openshift_logging_mux_allow_external | default('false') }}" + - name: "BUFFER_QUEUE_LIMIT" + value: "{{ openshift_logging_mux_buffer_queue_limit }}" + - name: "BUFFER_SIZE_LIMIT" + value: "{{ openshift_logging_mux_buffer_size_limit }}" + - name: "MUX_CPU_LIMIT" + value: "{{ openshift_logging_mux_cpu_limit }}" + - name: "MUX_MEMORY_LIMIT" + value: "{{ openshift_logging_mux_memory_limit }}" volumes: - name: config configMap: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index e5362105c..fbf69c270 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -15,18 +15,19 @@ Role Variables From this role: -| Name | Default value | | -|-------------------------------------|-----------------------|-------------------------------------------------------------------------------| -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master | -| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up | -| oreg_url | UNDEF | Default docker registry to use | -| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master | -| openshift_master_api_port | UNDEF | | -| openshift_master_console_port | UNDEF | | -| openshift_master_api_url | UNDEF | | -| openshift_master_console_url | UNDEF | | -| openshift_master_public_api_url | UNDEF | | -| openshift_master_public_console_url | UNDEF | | +| Name | Default value | | +|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------| +| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master | +| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up | +| oreg_url | UNDEF | Default docker registry to use | +| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master | +| openshift_master_api_port | UNDEF | | +| openshift_master_console_port | UNDEF | | +| openshift_master_api_url | UNDEF | | +| openshift_master_console_url | UNDEF | | +| openshift_master_public_api_url | UNDEF | | +| openshift_master_public_console_url | UNDEF | | +| openshift_master_saconfig_limit_secret_references | false | | From openshift_common: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 14a1daf6c..2d3ce5bcd 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,4 +1,4 @@ --- openshift_node_ips: [] -# TODO: update setting these values based on the facts -#openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}" +r_openshift_master_clean_install: false +r_openshift_master_etcd3_storage: false diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aed5598c0..86532cd0a 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -128,6 +128,9 @@ when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != '' with_items: "{{ openshift.master.identity_providers }}" +- set_fact: + openshift_push_via_dns: "{{ openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6 and r_openshift_master_clean_install }}" + - name: Install the systemd units include: systemd_units.yml diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 6e2439fd9..850fae0e4 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -1,5 +1,8 @@ OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }} CONFIG_FILE={{ openshift_master_config_file }} +{% if openshift_push_via_dns | default(false) %} +OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 +{% endif %} {% if openshift.common.is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 1935d9592..af3ebc6d2 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -139,6 +139,12 @@ kubernetesMasterConfig: - v1 {% endif %} apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} +{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %} + storage-backend: + - etcd3 + storage-media-type: + - application/vnd.kubernetes.protobuf +{% endif %} controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} masterIP: {{ openshift.common.ip }} @@ -229,7 +235,7 @@ projectConfig: routingConfig: subdomain: "{{ openshift_master_default_subdomain | default("") }}" serviceAccountConfig: - limitSecretReferences: false + limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }} managedNames: - default - builder diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index c484d23cc..c05a27559 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -1,5 +1,8 @@ OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }} CONFIG_FILE={{ openshift_master_config_file }} +{% if openshift_push_via_dns | default(false) %} +OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 +{% endif %} {% if openshift.common.is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index e0adbbf52..a153fb33d 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -1,5 +1,8 @@ OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }} CONFIG_FILE={{ openshift_master_config_file }} +{% if openshift_push_via_dns | default(false) %} +OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000 +{% endif %} {% if openshift.common.is_containerized | bool %} IMAGE_VERSION={{ openshift_image_tag }} {% endif %} diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 84503217b..1f10de4a2 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -68,6 +68,9 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml). - `openshift_metrics_resolution`: How often metrics should be gathered. +- `openshift_metrics_install_hawkular_agent`: Install the Hawkular OpenShift Agent (HOSA). HOSA can be used + to collect custom metrics from your pods. This component is currently in tech-preview and is not installed by default. + ## Additional variables to control resource limits Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting the corresponding role variable: diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 1d3db8a1a..c34936930 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -16,6 +16,7 @@ openshift_metrics_hawkular_nodeselector: "" openshift_metrics_cassandra_replicas: 1 openshift_metrics_cassandra_storage_type: "{{ openshift_hosted_metrics_storage_kind | default('emptydir') }}" openshift_metrics_cassandra_pvc_size: "{{ openshift_hosted_metrics_storage_volume_size | default('10Gi') }}" +openshift_metrics_cassandra_pv_selector: "{{ openshift_hosted_metrics_storage_labels | default('') }}" openshift_metrics_cassandra_limits_memory: 2G openshift_metrics_cassandra_limits_cpu: null openshift_metrics_cassandra_requests_memory: 1G @@ -30,6 +31,14 @@ openshift_metrics_heapster_requests_memory: 0.9375G openshift_metrics_heapster_requests_cpu: null openshift_metrics_heapster_nodeselector: "" +openshift_metrics_install_hawkular_agent: False +openshift_metrics_hawkular_agent_limits_memory: null +openshift_metrics_hawkular_agent_limits_cpu: null +openshift_metrics_hawkular_agent_requests_memory: null +openshift_metrics_hawkular_agent_requests_cpu: null +openshift_metrics_hawkular_agent_nodeselector: "" +openshift_metrics_hawkular_agent_namespace: "default" + openshift_metrics_hawkular_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}" openshift_metrics_duration: 7 diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index fb4fe2f03..7b81b3c10 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -73,6 +73,8 @@ {{ hawkular_secrets['hawkular-metrics.key'] }} tls.truststore.crt: > {{ hawkular_secrets['hawkular-cassandra.crt'] }} + ca.crt: > + {{ hawkular_secrets['ca.crt'] }} when: name not in metrics_secrets.stdout_lines changed_when: no diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 3b4e8560f..7928a0346 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -23,7 +23,7 @@ changed_when: false - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics" - when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == '' + when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" - name: generate hawkular-cassandra persistent volume claims template: @@ -35,6 +35,8 @@ metrics-infra: hawkular-cassandra access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" with_sequence: count={{ openshift_metrics_cassandra_replicas }} when: - openshift_metrics_cassandra_storage_type != 'emptydir' @@ -49,10 +51,9 @@ obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" labels: metrics-infra: hawkular-cassandra - annotations: - volume.alpha.kubernetes.io/storage-class: dynamic access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" with_sequence: count={{ openshift_metrics_cassandra_replicas }} when: openshift_metrics_cassandra_storage_type == 'dynamic' changed_when: false diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml new file mode 100644 index 000000000..cc533a68b --- /dev/null +++ b/roles/openshift_metrics/tasks/install_hosa.yaml @@ -0,0 +1,44 @@ +--- +- name: Generate Hawkular Agent (HOSA) Cluster Role + template: + src: hawkular_openshift_agent_role.j2 + dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-role.yaml" + changed_when: no + +- name: Generate Hawkular Agent (HOSA) Service Account + template: + src: hawkular_openshift_agent_sa.j2 + dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-sa.yaml" + changed_when: no + +- name: Generate Hawkular Agent (HOSA) Daemon Set + template: + src: hawkular_openshift_agent_ds.j2 + dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-ds.yaml" + vars: + node_selector: "{{openshift_metrics_hawkular_agent_nodeselector | default('') }}" + changed_when: no + +- name: Generate the Hawkular Agent (HOSA) Configmap + template: + src: hawkular_openshift_agent_cm.j2 + dest: "{{mktemp.stdout}}/templates/metrics-hawkular-openshift-agent-cm.yaml" + changed_when: no + +- name: Generate role binding for the hawkular-openshift-agent service account + template: + src: rolebinding.j2 + dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml" + vars: + cluster: True + obj_name: hawkular-openshift-agent-rb + labels: + metrics-infra: hawkular-agent + roleRef: + kind: ClusterRole + name: hawkular-openshift-agent + subjects: + - kind: ServiceAccount + name: hawkular-openshift-agent + namespace: "{{openshift_metrics_hawkular_agent_namespace}}" + changed_when: no diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 74eb56713..fdf4ae57f 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -16,11 +16,19 @@ include: install_heapster.yaml when: openshift_metrics_heapster_standalone | bool -- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml +- name: Install Hawkular OpenShift Agent (HOSA) + include: install_hosa.yaml + when: openshift_metrics_install_hawkular_agent | default(false) | bool + +- find: + paths: "{{ mktemp.stdout }}/templates" + patterns: "^(?!metrics-hawkular-openshift-agent).*.yaml" + use_regex: true register: object_def_files changed_when: no -- slurp: src={{item.path}} +- slurp: + src: "{{item.path}}" register: object_defs with_items: "{{object_def_files.files}}" changed_when: no @@ -34,6 +42,31 @@ file_content: "{{ item.content | b64decode | from_yaml }}" with_items: "{{ object_defs.results }}" +- find: + paths: "{{ mktemp.stdout }}/templates" + patterns: "^metrics-hawkular-openshift-agent.*.yaml" + use_regex: true + register: hawkular_agent_object_def_files + when: openshift_metrics_install_hawkular_agent | bool + changed_when: no + +- slurp: + src: "{{item.path}}" + register: hawkular_agent_object_defs + with_items: "{{ hawkular_agent_object_def_files.files }}" + when: openshift_metrics_install_hawkular_agent | bool + changed_when: no + +- name: Create Hawkular Agent objects + include: oc_apply.yaml + vars: + kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + namespace: "{{ openshift_metrics_hawkular_agent_namespace }}" + file_name: "{{ item.source }}" + file_content: "{{ item.content | b64decode | from_yaml }}" + with_items: "{{ hawkular_agent_object_defs.results }}" + when: openshift_metrics_install_hawkular_agent | bool + - include: update_master_config.yaml - command: > diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 5d8506a73..0b5f23c24 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -44,6 +44,9 @@ - include: "{{ (openshift_metrics_install_metrics | bool) | ternary('install_metrics.yaml','uninstall_metrics.yaml') }}" +- include: uninstall_hosa.yaml + when: not openshift_metrics_install_hawkular_agent | bool + - name: Delete temp directory local_action: file path=local_tmp.stdout state=absent tags: metrics_cleanup diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index dd67703b4..1e1af40e8 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -14,7 +14,7 @@ command: > {{ openshift.common.client_binary }} --config={{ kubeconfig }} apply -f {{ file_name }} - -n {{ openshift_metrics_project }} + -n {{namespace}} register: generation_apply failed_when: "'error' in generation_apply.stderr" changed_when: no diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml new file mode 100644 index 000000000..42ed02460 --- /dev/null +++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml @@ -0,0 +1,15 @@ +--- +- name: remove Hawkular Agent (HOSA) components + command: > + {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete --ignore-not-found --selector=metrics-infra=agent + all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings + register: delete_metrics + changed_when: delete_metrics.stdout != 'No resources found' + +- name: remove rolebindings + command: > + {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig + delete --ignore-not-found + clusterrolebinding/hawkular-openshift-agent-rb + changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 new file mode 100644 index 000000000..bf472c066 --- /dev/null +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_cm.j2 @@ -0,0 +1,54 @@ +id: hawkular-openshift-agent +kind: ConfigMap +apiVersion: v1 +name: Hawkular OpenShift Agent Configuration +metadata: + name: hawkular-openshift-agent-configuration + labels: + metrics-infra: agent + namespace: {{openshift_metrics_hawkular_agent_namespace}} +data: + config.yaml: | + kubernetes: + tenant: ${POD:namespace_name} + hawkular_server: + url: https://hawkular-metrics.openshift-infra.svc.cluster.local + credentials: + username: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.username + password: secret:openshift-infra/hawkular-metrics-account/hawkular-metrics.password + ca_cert_file: secret:openshift-infra/hawkular-metrics-certs/ca.crt + emitter: + status_enabled: false + collector: + minimum_collection_interval: 10s + default_collection_interval: 30s + metric_id_prefix: pod/${POD:uid}/custom/ + tags: + metric_name: ${METRIC:name} + description: ${METRIC:description} + units: ${METRIC:units} + namespace_id: ${POD:namespace_uid} + namespace_name: ${POD:namespace_name} + node_name: ${POD:node_name} + pod_id: ${POD:uid} + pod_ip: ${POD:ip} + pod_name: ${POD:name} + pod_namespace: ${POD:namespace_name} + hostname: ${POD:hostname} + host_ip: ${POD:host_ip} + labels: ${POD:labels} + type: pod + collector: hawkular_openshift_agent + custom_metric: true + hawkular-openshift-agent: | + endpoints: + - type: prometheus + protocol: "http" + port: 8080 + path: /metrics + collection_interval: 30s + metrics: + - name: hawkular_openshift_agent_metric_data_points_collected_total + - name: hawkular_openshift_agent_monitored_endpoints + - name: hawkular_openshift_agent_monitored_pods + - name: hawkular_openshift_agent_monitored_metrics diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 new file mode 100644 index 000000000..d65eaf9ae --- /dev/null +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 @@ -0,0 +1,91 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: hawkular-openshift-agent + labels: + name: hawkular-openshift-agent + metrics-infra: agent + namespace: {{openshift_metrics_hawkular_agent_namespace}} +spec: + selector: + matchLabels: + name: hawkular-openshift-agent + template: + metadata: + labels: + name: hawkular-openshift-agent + metrics-infra: agent + spec: + serviceAccount: hawkular-openshift-agent +{% if node_selector is iterable and node_selector | length > 0 %} + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}} + imagePullPolicy: Always + name: hawkular-openshift-agent +{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none) + or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none) + or (openshift_metrics_hawkular_agent_requests_cpu is defined and openshift_metrics_hawkular_agent_requests_cpu is not none) + or (openshift_metrics_hawkular_agent_requests_memory is defined and openshift_metrics_hawkular_agent_requests_memory is not none)) +%} + resources: +{% if (openshift_metrics_hawkular_agent_limits_cpu is not none + or openshift_metrics_hawkular_agent_limits_memory is not none) +%} + limits: +{% if openshift_metrics_hawkular_agent_limits_cpu is not none %} + cpu: "{{openshift_metrics_hawkular_agent_limits_cpu}}" +{% endif %} +{% if openshift_metrics_hawkular_agent_limits_memory is not none %} + memory: "{{openshift_metrics_hawkular_agent_limits_memory}}" +{% endif %} +{% endif %} +{% if (openshift_metrics_hawkular_agent_requests_cpu is not none + or openshift_metrics_hawkular_agent_requests_memory is not none) +%} + requests: +{% if openshift_metrics_hawkular_agent_requests_cpu is not none %} + cpu: "{{openshift_metrics_hawkular_agent_requests_cpu}}" +{% endif %} +{% if openshift_metrics_hawkular_agent_requests_memory is not none %} + memory: "{{openshift_metrics_hawkular_agent_requests_memory}}" +{% endif %} +{% endif %} +{% endif %} + + livenessProbe: + httpGet: + scheme: HTTP + path: /health + port: 8080 + initialDelaySeconds: 30 + periodSeconds: 30 + command: + - "hawkular-openshift-agent" + - "-config" + - "/hawkular-openshift-agent-configuration/config.yaml" + - "-v" + - "3" + env: + - name: K8S_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: hawkular-openshift-agent-configuration + mountPath: "/hawkular-openshift-agent-configuration" + volumes: + - name: hawkular-openshift-agent-configuration + configMap: + name: hawkular-openshift-agent-configuration + - name: hawkular-openshift-agent + configMap: + name: hawkular-openshift-agent-configuration diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 new file mode 100644 index 000000000..24b8cd801 --- /dev/null +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_role.j2 @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: ClusterRole +metadata: + name: hawkular-openshift-agent + labels: + metrics-infra: agent +rules: +- apiGroups: + - "" + resources: + - configmaps + - namespaces + - nodes + - pods + - projects + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 new file mode 100644 index 000000000..ec604d73c --- /dev/null +++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_sa.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hawkular-openshift-agent + labels: + metrics-infra: agent + namespace: {{openshift_metrics_hawkular_agent_namespace}} diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2 index c2e56ba21..b4e6a1503 100644 --- a/roles/openshift_metrics/templates/pvc.j2 +++ b/roles/openshift_metrics/templates/pvc.j2 @@ -18,6 +18,13 @@ metadata: {% endfor %} {% endif %} spec: +{% if pv_selector is defined and pv_selector is mapping %} + selector: + matchLabels: +{% for key,value in pv_selector.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} accessModes: {% for mode in access_modes %} - {{ mode }} @@ -25,3 +32,6 @@ spec: resources: requests: storage: {{size}} +{% if storage_class_name is defined %} + storageClassName: {{ storage_class_name }} +{% endif %} diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 4dcf1eef8..a6bd12d4e 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,6 +1,8 @@ --- - name: restart openvswitch - systemd: name=openvswitch state=restarted + systemd: + name: openvswitch + state: restarted when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool notify: - restart openvswitch pause @@ -10,8 +12,13 @@ when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool - name: restart node - systemd: name={{ openshift.common.service_type }}-node state=restarted + systemd: + name: "{{ openshift.common.service_type }}-node" + state: restarted when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool) - name: reload sysctl.conf command: /sbin/sysctl -p + +- name: reload systemd units + command: systemctl daemon-reload diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index f58c803c4..e3ce5df3d 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -8,6 +8,9 @@ src: openshift.docker.node.dep.service register: install_node_dep_result when: openshift.common.is_containerized | bool + notify: + - reload systemd units + - restart node - block: - name: Pre-pull node image @@ -21,6 +24,9 @@ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" src: openshift.docker.node.service register: install_node_result + notify: + - reload systemd units + - restart node when: - openshift.common.is_containerized | bool - not openshift.common.is_node_system_container | bool @@ -31,6 +37,9 @@ src: "{{ openshift.common.service_type }}-node.service.j2" register: install_node_result when: not openshift.common.is_containerized | bool + notify: + - reload systemd units + - restart node - name: Create the openvswitch service env file template: @@ -39,6 +48,7 @@ when: openshift.common.is_containerized | bool register: install_ovs_sysconfig notify: + - reload systemd units - restart openvswitch - name: Install Node system container @@ -67,6 +77,7 @@ when: openshift.common.use_openshift_sdn | default(true) | bool register: install_oom_fix_result notify: + - reload systemd units - restart openvswitch - block: @@ -81,6 +92,7 @@ dest: "/etc/systemd/system/openvswitch.service" src: openvswitch.docker.service notify: + - reload systemd units - restart openvswitch when: - openshift.common.is_containerized | bool @@ -119,8 +131,3 @@ when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '') notify: - restart node - -- name: Reload systemd units - command: systemctl daemon-reload - notify: - - restart node diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh index 24798d3d2..c68073a10 100755 --- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh +++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh @@ -96,6 +96,9 @@ EOF if ! grep -q '99-origin-dns.sh' ${NEW_RESOLV_CONF}; then echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF} fi + if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then + sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF} + fi cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf fi fi diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 index f397cbbf1..8bae9aaac 100644 --- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 +++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 @@ -1,3 +1,5 @@ no-resolv domain-needed server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }} +no-negcache +max-cache-ttl=1 diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index d44839d69..8eaa68cc9 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -147,3 +147,6 @@ # Give the node two minutes to come back online. retries: 24 delay: 5 + +- include_role: + name: openshift_node_dnsmasq diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index 877e88002..9c5103597 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -7,6 +7,12 @@ items: kind: PersistentVolume metadata: name: "{{ volume.name }}" +{% if volume.labels is defined and volume.labels is mapping %} + labels: +{% for key,value in volume.labels.iteritems() %} + {{ key }}: {{ value }} +{% endfor %} +{% endif %} spec: capacity: storage: "{{ volume.capacity }}" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 023b1a9b7..8f8550e2d 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -4,7 +4,8 @@ path: /run/ostree-booted register: ostree_booted -- block: +- when: not ostree_booted.stat.exists + block: - name: Ensure libselinux-python is installed package: name=libselinux-python state=present @@ -24,41 +25,40 @@ - openshift_additional_repos | length == 0 notify: refresh cache - # Note: OpenShift repositories under CentOS may be shipped through the - # "centos-release-openshift-origin" package which configures the repository. - # This task matches the file names provided by the package so that they are - # not installed twice in different files and remains idempotent. - - name: Configure origin gpg keys if needed - copy: - src: "{{ item.src }}" - dest: "{{ item.dest }}" - with_items: - - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS - dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS - - src: origin/repos/openshift-ansible-centos-paas-sig.repo - dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo - notify: refresh cache - when: - - ansible_os_family == "RedHat" - - ansible_distribution != "Fedora" - - openshift_deployment_type == 'origin' - - openshift_enable_origin_repo | default(true) | bool - # Singleton block - - when: r_osr_first_run | default(true) + - when: r_openshift_repos_has_run is not defined block: + + # Note: OpenShift repositories under CentOS may be shipped through the + # "centos-release-openshift-origin" package which configures the repository. + # This task matches the file names provided by the package so that they are + # not installed twice in different files and remains idempotent. + - name: Configure origin gpg keys if needed + copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + with_items: + - src: origin/gpg_keys/openshift-ansible-CentOS-SIG-PaaS + dest: /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + - src: origin/repos/openshift-ansible-centos-paas-sig.repo + dest: /etc/yum.repos.d/CentOS-OpenShift-Origin.repo + notify: refresh cache + when: + - ansible_os_family == "RedHat" + - ansible_distribution != "Fedora" + - openshift_deployment_type == 'origin' + - openshift_enable_origin_repo | default(true) | bool + - name: Ensure clean repo cache in the event repos have been changed manually debug: msg: "First run of openshift_repos" changed_when: true notify: refresh cache - - name: Set fact r_osr_first_run false + - name: Record that openshift_repos already ran set_fact: - r_osr_first_run: false + r_openshift_repos_has_run: True # Force running ALL handlers now, because we expect repo cache to be cleared # if changes have been made. - meta: flush_handlers - - when: not ostree_booted.stat.exists diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml new file mode 100644 index 000000000..01ee2544d --- /dev/null +++ b/roles/openshift_service_catalog/defaults/main.yml @@ -0,0 +1,3 @@ +--- +openshift_service_catalog_remove: false +openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"} diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml new file mode 100644 index 000000000..880146ca4 --- /dev/null +++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml @@ -0,0 +1,161 @@ +apiVersion: v1 +kind: Template +metadata: + name: service-catalog +objects: + +- kind: ClusterRole + apiVersion: v1 + metadata: + name: servicecatalog-serviceclass-viewer + rules: + - apiGroups: + - servicecatalog.k8s.io + resources: + - serviceclasses + verbs: + - list + - watch + - get + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: servicecatalog-serviceclass-viewer-binding + roleRef: + name: servicecatalog-serviceclass-viewer + groupNames: + - system:authenticated + +- kind: ServiceAccount + apiVersion: v1 + metadata: + name: service-catalog-controller + +- kind: ServiceAccount + apiVersion: v1 + metadata: + name: service-catalog-apiserver + +- kind: ClusterRole + apiVersion: v1 + metadata: + name: sar-creator + rules: + - apiGroups: + - "" + resources: + - subjectaccessreviews.authorization.k8s.io + verbs: + - create + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: service-catalog-sar-creator-binding + roleRef: + name: sar-creator + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + +- kind: ClusterRole + apiVersion: v1 + metadata: + name: namespace-viewer + rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - list + - watch + - get + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: service-catalog-namespace-viewer-binding + roleRef: + name: namespace-viewer + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: service-catalog-controller-namespace-viewer-binding + roleRef: + name: namespace-viewer + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-controller + +- kind: ClusterRole + apiVersion: v1 + metadata: + name: service-catalog-controller + rules: + - apiGroups: + - "" + resources: + - secrets + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch + - apiGroups: + - servicecatalog.k8s.io + resources: + - brokers/status + - instances/status + - bindings/status + verbs: + - update + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: service-catalog-controller-binding + roleRef: + name: service-catalog-controller + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-controller + +- kind: Role + apiVersion: v1 + metadata: + name: endpoint-accessor + rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - list + - watch + - get + - create + - update + +- kind: RoleBinding + apiVersion: v1 + metadata: + name: endpoint-accessor-binding + roleRef: + name: endpoint-accessor + namespace: kube-service-catalog + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-controller + +- kind: ClusterRoleBinding + apiVersion: v1 + metadata: + name: system:auth-delegator-binding + roleRef: + name: system:auth-delegator + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-apiserver diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml new file mode 100644 index 000000000..f6ee0955d --- /dev/null +++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Template +metadata: + name: kube-system-service-catalog +objects: + +- kind: Role + apiVersion: v1 + metadata: + name: extension-apiserver-authentication-reader + namespace: ${KUBE_SYSTEM_NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - extension-apiserver-authentication + resources: + - configmaps + verbs: + - get + +- kind: RoleBinding + apiVersion: v1 + metadata: + name: extension-apiserver-authentication-reader-binding + namespace: ${KUBE_SYSTEM_NAMESPACE} + roleRef: + name: extension-apiserver-authentication-reader + namespace: kube-system + userNames: + - system:serviceaccount:kube-service-catalog:service-catalog-apiserver + +parameters: +- description: Do not change this value. + displayName: Name of the kube-system namespace + name: KUBE_SYSTEM_NAMESPACE + required: true + value: kube-system diff --git a/roles/openshift_service_catalog/meta/main.yml b/roles/openshift_service_catalog/meta/main.yml new file mode 100644 index 000000000..1e6b837cd --- /dev/null +++ b/roles/openshift_service_catalog/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + author: OpenShift Red Hat + description: OpenShift Service Catalog + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 2.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- role: lib_openshift +- role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml new file mode 100644 index 000000000..cc897b032 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -0,0 +1,70 @@ +--- +- name: Create service catalog cert directory + file: + path: "{{ openshift.common.config_base }}/service-catalog" + state: directory + mode: 0755 + changed_when: False + check_mode: no + +- set_fact: + generated_certs_dir: "{{ openshift.common.config_base }}/service-catalog" + +- name: Generate signing cert + command: > + {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert + --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt + --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer + +- name: Generating server keys + oc_adm_ca_server_cert: + cert: "{{ generated_certs_dir }}/apiserver.crt" + key: "{{ generated_certs_dir }}/apiserver.key" + hostnames: "apiserver.kube-service-catalog.svc,apiserver.kube-service-catalog.svc.cluster.local,apiserver.kube-service-catalog" + signer_cert: "{{ generated_certs_dir }}/ca.crt" + signer_key: "{{ generated_certs_dir }}/ca.key" + signer_serial: "{{ generated_certs_dir }}/apiserver.serial.txt" + +- name: Create apiserver-ssl secret + oc_secret: + state: present + name: apiserver-ssl + namespace: kube-service-catalog + files: + - name: tls.crt + path: "{{ generated_certs_dir }}/apiserver.crt" + - name: tls.key + path: "{{ generated_certs_dir }}/apiserver.key" + +- slurp: + src: "{{ generated_certs_dir }}/ca.crt" + register: apiserver_ca + +- shell: > + oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" + register: get_apiservices + changed_when: no + +- name: Create api service + oc_obj: + state: present + name: v1alpha1.servicecatalog.k8s.io + kind: apiservices.apiregistration.k8s.io + namespace: "kube-service-catalog" + content: + path: /tmp/apisvcout + data: + apiVersion: apiregistration.k8s.io/v1beta1 + kind: APIService + metadata: + name: v1alpha1.servicecatalog.k8s.io + spec: + group: servicecatalog.k8s.io + version: v1alpha1 + service: + namespace: "kube-service-catalog" + name: apiserver + caBundle: "{{ apiserver_ca.content }}" + groupPriorityMinimum: 20 + versionPriority: 10 + when: "'not found' in get_apiservices.stdout" diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml new file mode 100644 index 000000000..c1773b5f6 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -0,0 +1,181 @@ +--- +# do any asserts here + +- name: Create temp directory for doing work in + command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX + register: mktemp + changed_when: False + + +- include: wire_aggregator.yml + +- name: Set default image variables based on deployment_type + include_vars: "{{ item }}" + with_first_found: + - "{{ openshift_deployment_type | default(deployment_type) }}.yml" + - "default_images.yml" + +- name: Set service_catalog image facts + set_fact: + openshift_service_catalog_image_prefix: "{{ openshift_service_catalog_image_prefix | default(__openshift_service_catalog_image_prefix) }}" + openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(__openshift_service_catalog_image_version) }}" + +- name: Set Service Catalog namespace + oc_project: + state: present + name: "kube-service-catalog" +# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}" + +- include: generate_certs.yml + +- copy: + src: kubeservicecatalog_roles_bindings.yml + dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml" + +- oc_obj: + name: service-catalog + kind: template + namespace: "kube-service-catalog" + files: + - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml" + delete_after: yes + +- oc_process: + create: True + template_name: service-catalog + namespace: "kube-service-catalog" + +- copy: + src: kubesystem_roles_bindings.yml + dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml" + +- oc_obj: + name: kube-system-service-catalog + kind: template + namespace: kube-system + files: + - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml" + delete_after: yes + +- oc_process: + create: True + template_name: kube-system-service-catalog + namespace: kube-system + +- shell: > + oc get policybindings/kube-system:default -n kube-system || echo "not found" + register: get_kube_system + changed_when: no + +- command: > + oc create policybinding kube-system -n kube-system + when: "'not found' in get_kube_system.stdout" + +- oc_adm_policy_user: + namespace: kube-service-catalog + resource_kind: scc + resource_name: hostmount-anyuid + state: present + user: "system:serviceaccount:kube-service-catalog:service-catalog-apiserver" + +- name: Set SA cluster-role + oc_adm_policy_user: + state: present + namespace: "kube-service-catalog" + resource_kind: cluster-role + resource_name: admin + user: "system:serviceaccount:kube-service-catalog:default" + +## api server +- template: + src: api_server.j2 + dest: "{{ mktemp.stdout }}/service_catalog_api_server.yml" + vars: + image: "" + namespace: "" + cpu_limit: none + memory_limit: none + cpu_requests: none + memory_request: none + cors_allowed_origin: localhost + node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}" + +- name: Set Service Catalog API Server daemonset + oc_obj: + state: present + namespace: "kube-service-catalog" + kind: daemonset + name: apiserver + files: + - "{{ mktemp.stdout }}/service_catalog_api_server.yml" + delete_after: yes + +- template: + src: api_server_service.j2 + dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml" + +- name: Set Service Catalog API Server service + oc_obj: + state: present + namespace: "kube-service-catalog" + kind: service + name: apiserver + files: + - "{{ mktemp.stdout }}/service_catalog_api_service.yml" + delete_after: yes + +- template: + src: api_server_route.j2 + dest: "{{ mktemp.stdout }}/service_catalog_api_route.yml" + +- name: Set Service Catalog API Server route + oc_obj: + state: present + namespace: "kube-service-catalog" + kind: route + name: apiserver + files: + - "{{ mktemp.stdout }}/service_catalog_api_route.yml" + delete_after: yes + +## controller manager +- template: + src: controller_manager.j2 + dest: "{{ mktemp.stdout }}/controller_manager.yml" + vars: + image: "" + cpu_limit: none + memory_limit: none + node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}" + +- name: Set Controller Manager deployment + oc_obj: + state: present + namespace: "kube-service-catalog" + kind: daemonset + name: controller-manager + files: + - "{{ mktemp.stdout }}/controller_manager.yml" + delete_after: yes + +- template: + src: controller_manager_service.j2 + dest: "{{ mktemp.stdout }}/controller_manager_service.yml" + +- name: Set Controller Manager service + oc_obj: + state: present + namespace: "kube-service-catalog" + kind: service + name: controller-manager + files: + - "{{ mktemp.stdout }}/controller_manager_service.yml" + delete_after: yes + +- include: start_api_server.yml + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml new file mode 100644 index 000000000..dc0d6a370 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/main.yml @@ -0,0 +1,8 @@ +--- +# do any asserts here + +- include: install.yml + when: not openshift_service_catalog_remove | default(false) | bool + +- include: remove.yml + when: openshift_service_catalog_remove | default(false) | bool diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml new file mode 100644 index 000000000..2fb1ec440 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -0,0 +1,56 @@ +--- +- name: Remove Service Catalog APIServer + command: > + oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog + +- name: Remove Policy Binding + command: > + oc delete policybindings/kube-system:default -n kube-system --ignore-not-found + +# TODO: this module doesn't currently remove this +#- name: Remove service catalog api service +# oc_obj: +# state: absent +# namespace: "kube-service-catalog" +# kind: apiservices.apiregistration.k8s.io +# name: v1alpha1.servicecatalog.k8s.io + +- name: Remove Service Catalog API Server route + oc_obj: + state: absent + namespace: "kube-service-catalog" + kind: route + name: apiserver + +- name: Remove Service Catalog API Server service + oc_obj: + state: absent + namespace: "kube-service-catalog" + kind: service + name: apiserver + +- name: Remove Service Catalog API Server daemonset + oc_obj: + state: absent + namespace: "kube-service-catalog" + kind: daemonset + name: apiserver + +- name: Remove Controller Manager service + oc_obj: + state: absent + namespace: "kube-service-catalog" + kind: service + name: controller-manager + +- name: Remove Controller Manager deployment + oc_obj: + state: absent + namespace: "kube-service-catalog" + kind: deployment + name: controller-manager + +- name: Remove Service Catalog namespace + oc_project: + state: absent + name: "kube-service-catalog" diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml new file mode 100644 index 000000000..b143292b6 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/start_api_server.yml @@ -0,0 +1,22 @@ +--- +# Label nodes and wait for apiserver and controller to be running (at least one) +- name: Label {{ openshift.node.nodename }} for APIServer and controller deployment + oc_label: + name: "{{ openshift.node.nodename }}" + kind: node + state: add + labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}" + +# wait to see that the apiserver is available +- name: wait for api server to be ready + command: > + curl -k https://apiserver.kube-service-catalog.svc/healthz + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_health + until: api_health.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml new file mode 100644 index 000000000..3e5897ba4 --- /dev/null +++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml @@ -0,0 +1,86 @@ +--- +# TODO: this currently has a bug where hostnames are required +- name: Creating Aggregator signer certs + command: > + oc adm ca create-signer-cert + --cert=/etc/origin/master/front-proxy-ca.crt + --key=/etc/origin/master/front-proxy-ca.key + --serial=/etc/origin/master/ca.serial.txt +# oc_adm_ca_server_cert: +# cert: /etc/origin/master/front-proxy-ca.crt +# key: /etc/origin/master/front-proxy-ca.key + +- name: Create api-client config for Aggregator + command: > + oc adm create-api-client-config + --certificate-authority=/etc/origin/master/front-proxy-ca.crt + --signer-cert=/etc/origin/master/front-proxy-ca.crt + --signer-key=/etc/origin/master/front-proxy-ca.key + --user aggregator-front-proxy + --client-dir=/etc/origin/master + --signer-serial=/etc/origin/master/ca.serial.txt + +- name: Update master config + yedit: + state: present + src: /etc/origin/master/master-config.yaml + edits: + - key: aggregatorConfig.proxyClientInfo.certFile + value: aggregator-front-proxy.crt + - key: aggregatorConfig.proxyClientInfo.keyFile + value: aggregator-front-proxy.key + - key: authConfig.requestHeader.clientCA + value: front-proxy-ca.crt + - key: authConfig.requestHeader.clientCommonNames + value: [aggregator-front-proxy] + - key: authConfig.requestHeader.usernameHeaders + value: [X-Remote-User] + - key: authConfig.requestHeader.groupHeaders + value: [X-Remote-Group] + - key: authConfig.requestHeader.extraHeaderPrefixes + value: [X-Remote-Extra-] + register: yedit_output + +#restart master serially here +- name: restart master + systemd: name={{ openshift.common.service_type }}-master state=restarted + when: + - yedit_output.changed + - openshift.master.ha is not defined or not openshift.master.ha | bool + +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: + - yedit_output.changed + - openshift.master.ha is defined and openshift.master.ha | bool + - openshift.master.cluster_method == 'native' + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: + - yedit_output.changed + - openshift.master.ha is defined and openshift.master.ha | bool + - openshift.master.cluster_method == 'native' + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + {% if openshift.common.version_gte_3_2_or_1_2 | bool %} + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {% else %} + --cacert {{ openshift.common.config_base }}/master/ca.crt + {% endif %} + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + when: + - yedit_output.changed diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2 new file mode 100644 index 000000000..8ae6b6c8d --- /dev/null +++ b/roles/openshift_service_catalog/templates/api_server.j2 @@ -0,0 +1,80 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + labels: + app: apiserver + name: apiserver +spec: + selector: + matchLabels: + app: apiserver + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: apiserver + spec: + serviceAccountName: service-catalog-apiserver + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} + containers: + - args: + - --storage-type + - etcd + - --secure-port + - "6443" + - --etcd-servers +# TODO: come back and get openshift.common.hostname to work + - https://{{ openshift.common.ip }}:{{ openshift.master.etcd_port }} + - --etcd-cafile + - /etc/origin/master/master.etcd-ca.crt + - --etcd-certfile + - /etc/origin/master/master.etcd-client.crt + - --etcd-keyfile + - /etc/origin/master/master.etcd-client.key + - -v + - "10" + - --cors-allowed-origins + - {{ cors_allowed_origin }} + - --admission-control + - "KubernetesNamespaceLifecycle" + image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} + command: ["/usr/bin/apiserver"] + imagePullPolicy: Always + name: apiserver + ports: + - containerPort: 6443 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /var/run/kubernetes-service-catalog + name: apiserver-ssl + readOnly: true + - mountPath: /etc/origin/master + name: etcd-host-cert + readOnly: true + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - name: apiserver-ssl + secret: + defaultMode: 420 + secretName: apiserver-ssl + items: + - key: tls.crt + path: apiserver.crt + - key: tls.key + path: apiserver.key + - hostPath: + path: /etc/origin/master + name: etcd-host-cert + - emptyDir: {} + name: data-dir diff --git a/roles/openshift_service_catalog/templates/api_server_route.j2 b/roles/openshift_service_catalog/templates/api_server_route.j2 new file mode 100644 index 000000000..3c3da254d --- /dev/null +++ b/roles/openshift_service_catalog/templates/api_server_route.j2 @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Route +metadata: + name: apiserver +spec: + port: + targetPort: secure + tls: + termination: passthrough + to: + kind: Service + name: apiserver + weight: 100 + wildcardPolicy: None diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2 new file mode 100644 index 000000000..bae337201 --- /dev/null +++ b/roles/openshift_service_catalog/templates/api_server_service.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: apiserver +spec: + ports: + - name: secure + port: 443 + protocol: TCP + targetPort: 6443 + selector: + app: apiserver + sessionAffinity: None diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2 new file mode 100644 index 000000000..33932eeb7 --- /dev/null +++ b/roles/openshift_service_catalog/templates/controller_manager.j2 @@ -0,0 +1,46 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + labels: + app: controller-manager + name: controller-manager +spec: + selector: + matchLabels: + app: controller-manager + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: controller-manager + spec: + nodeSelector: +{% for key, value in node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} + containers: + - env: + - name: K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - -v + - "5" + - "--leader-election-namespace=$(K8S_NAMESPACE)" + image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }} + command: ["/usr/bin/controller-manager"] + imagePullPolicy: Always + name: controller-manager + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + restartPolicy: Always + securityContext: {} + terminationGracePeriodSeconds: 30 diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2 new file mode 100644 index 000000000..2bac645fc --- /dev/null +++ b/roles/openshift_service_catalog/templates/controller_manager_service.j2 @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: controller-manager +spec: + ports: + - port: 6443 + protocol: TCP + targetPort: 6443 + selector: + app: controller-manager + sessionAffinity: None + type: ClusterIP diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml new file mode 100644 index 000000000..6fb9d1b86 --- /dev/null +++ b/roles/openshift_service_catalog/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-" +__openshift_service_catalog_image_version: "latest" diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml new file mode 100644 index 000000000..8c3f14485 --- /dev/null +++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/" +__openshift_service_catalog_image_version: "3.6.0" diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 7b310dbf8..da4e348b4 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -1,7 +1,31 @@ OpenShift GlusterFS Cluster =========================== -OpenShift GlusterFS Cluster Installation +OpenShift GlusterFS Cluster Configuration + +This role handles the configuration of GlusterFS clusters. It can handle +two primary configuration scenarios: + +* Configuring a new, natively-hosted GlusterFS cluster. In this scenario, + GlusterFS pods are deployed on nodes in the OpenShift cluster which are + configured to provide storage. +* Configuring a new, external GlusterFS cluster. In this scenario, the + cluster nodes have the GlusterFS software pre-installed but have not + been configured yet. The installer will take care of configuring the + cluster(s) for use by OpenShift applications. +* Using existing GlusterFS clusters. In this scenario, one or more + GlusterFS clusters are assumed to be already setup. These clusters can + be either natively-hosted or external, but must be managed by a + [heketi service](https://github.com/heketi/heketi). + +As part of the configuration, a particular GlusterFS cluster may be +specified to provide backend storage for a natively-hosted Docker +registry. + +Unless configured otherwise, a StorageClass will be automatically +created for each non-registry GlusterFS cluster. This will allow +applications which can mount PersistentVolumes to request +dynamically-provisioned GlusterFS volumes. Requirements ------------ @@ -21,28 +45,53 @@ hosted Docker registry: * `[glusterfs_registry]` +Host Variables +-------------- + +For configuring new clusters, the following role variables are available. + +Each host in either of the above groups must have the following variable +defined: + +| Name | Default value | Description | +|-------------------|---------------|-----------------------------------------| +| glusterfs_devices | None | A list of block devices that will be completely managed as part of a GlusterFS cluster. There must be at least one device listed. Each device must be bare, e.g. no partitions or LVM PVs. **Example:** '[ "/dev/sdb" ]' + +In addition, each host may specify the following variables to further control +their configuration as GlusterFS nodes: + +| Name | Default value | Description | +|--------------------|---------------------------|-----------------------------------------| +| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace +| glusterfs_hostname | openshift.common.hostname | A hostname (or IP address) that will be used for internal GlusterFS communication +| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node +| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones + Role Variables -------------- This role has the following variables that control the integration of a GlusterFS cluster into a new or existing OpenShift cluster: -| Name | Default value | | +| Name | Default value | Description | |--------------------------------------------------|-------------------------|-----------------------------------------| | openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready | openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources | openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized -| openshift_storage_glusterfs_nodeselector | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode +| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names +| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods | openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** | openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized | openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' | openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods -| openshift_storage_glusterfs_heketi_admin_key | '' | String to use as secret key for performing heketi commands as admin -| openshift_storage_glusterfs_heketi_user_key | '' | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes | openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` Each role variable also has a corresponding variable to optionally configure a @@ -52,17 +101,24 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | | -|---------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' -| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters +| Name | Default value | Description | +|-------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to the following registry-specific -variable: - -| Name | Default value | Description | -|----------------------------------------------|---------------|------------------------------------------------------------------------------| -| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | +variables: + +| Name | Default value | Description | +|-----------------------------------------------|------------------------------|-----------------------------------------| +| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes +| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage +| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume +| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume Dependencies ------------ diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ebe9ca30b..4ff56af9e 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,9 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_namespace: 'default' openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs' +openshift_storage_glusterfs_name: 'storage' +openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_storageclass: True openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False @@ -11,16 +13,19 @@ openshift_storage_glusterfs_heketi_is_missing: True openshift_storage_glusterfs_heketi_deploy_is_missing: True openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" openshift_storage_glusterfs_heketi_version: 'latest' -openshift_storage_glusterfs_heketi_admin_key: '' -openshift_storage_glusterfs_heketi_user_key: '' +openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}" +openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" openshift_storage_glusterfs_heketi_url: "{{ omit }}" +openshift_storage_glusterfs_heketi_port: 8080 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" -openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_name: 'registry' +openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_storageclass: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" @@ -29,8 +34,9 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" -openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" -openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}" openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" +openshift_storage_glusterfs_registry_heketi_port: 8080 diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index c9945be13..4434f750c 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -9,49 +9,47 @@ metadata: annotations: description: Bootstrap Heketi installation tags: glusterfs,heketi,installation -labels: - template: deploy-heketi objects: - kind: Service apiVersion: v1 metadata: - name: deploy-heketi + name: deploy-heketi-${CLUSTER_NAME} labels: - glusterfs: deploy-heketi-service + glusterfs: deploy-heketi-${CLUSTER_NAME}-service deploy-heketi: support annotations: description: Exposes Heketi service spec: ports: - - name: deploy-heketi + - name: deploy-heketi-${CLUSTER_NAME} port: 8080 targetPort: 8080 selector: - name: deploy-heketi + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod - kind: Route apiVersion: v1 metadata: - name: deploy-heketi + name: ${HEKETI_ROUTE} labels: - glusterfs: deploy-heketi-route + glusterfs: deploy-heketi-${CLUSTER_NAME}-route deploy-heketi: support spec: to: kind: Service - name: deploy-heketi + name: deploy-heketi-${CLUSTER_NAME} - kind: DeploymentConfig apiVersion: v1 metadata: - name: deploy-heketi + name: deploy-heketi-${CLUSTER_NAME} labels: - glusterfs: deploy-heketi-dc + glusterfs: deploy-heketi-${CLUSTER_NAME}-dc deploy-heketi: support annotations: description: Defines how to deploy Heketi spec: replicas: 1 selector: - name: deploy-heketi + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod triggers: - type: ConfigChange strategy: @@ -60,13 +58,12 @@ objects: metadata: name: deploy-heketi labels: - name: deploy-heketi - glusterfs: deploy-heketi-pod + glusterfs: deploy-heketi-${CLUSTER_NAME}-pod deploy-heketi: support spec: - serviceAccountName: heketi-service-account + serviceAccountName: heketi-${CLUSTER_NAME}-service-account containers: - - name: deploy-heketi + - name: heketi image: ${IMAGE_NAME}:${IMAGE_VERSION} env: - name: HEKETI_USER_KEY @@ -81,11 +78,15 @@ objects: value: '14' - name: HEKETI_KUBE_GLUSTER_DAEMONSET value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} ports: - containerPort: 8080 volumeMounts: - name: db mountPath: /var/lib/heketi + - name: topology + mountPath: ${TOPOLOGY_PATH} readinessProbe: timeoutSeconds: 3 initialDelaySeconds: 3 @@ -100,6 +101,9 @@ objects: port: 8080 volumes: - name: db + - name: topology + secret: + secretName: heketi-${CLUSTER_NAME}-topology-secret parameters: - name: HEKETI_USER_KEY displayName: Heketi User Secret @@ -107,9 +111,24 @@ parameters: - name: HEKETI_ADMIN_KEY displayName: Heketi Administrator Secret description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" - name: IMAGE_NAME - displayName: GlusterFS container name + displayName: heketi container image name required: True - name: IMAGE_VERSION - displayName: GlusterFS container versiona + displayName: heketi container image version + required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs +- name: TOPOLOGY_PATH + displayName: heketi topology file location required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml index c66705752..8c5e1ded3 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml @@ -12,24 +12,24 @@ objects: - kind: DaemonSet apiVersion: extensions/v1beta1 metadata: - name: glusterfs + name: glusterfs-${CLUSTER_NAME} labels: - glusterfs: daemonset + glusterfs: ${CLUSTER_NAME}-daemonset annotations: description: GlusterFS DaemonSet tags: glusterfs spec: selector: matchLabels: - glusterfs-node: pod + glusterfs: ${CLUSTER_NAME}-pod template: metadata: - name: glusterfs + name: glusterfs-${CLUSTER_NAME} labels: + glusterfs: ${CLUSTER_NAME}-pod glusterfs-node: pod spec: - nodeSelector: - storagenode: glusterfs + nodeSelector: "${{NODE_LABELS}}" hostNetwork: true containers: - name: glusterfs @@ -63,26 +63,26 @@ objects: privileged: true readinessProbe: timeoutSeconds: 3 - initialDelaySeconds: 100 + initialDelaySeconds: 40 exec: command: - "/bin/bash" - "-c" - systemctl status glusterd.service - periodSeconds: 10 + periodSeconds: 25 successThreshold: 1 - failureThreshold: 3 + failureThreshold: 15 livenessProbe: timeoutSeconds: 3 - initialDelaySeconds: 100 + initialDelaySeconds: 40 exec: command: - "/bin/bash" - "-c" - systemctl status glusterd.service - periodSeconds: 10 + periodSeconds: 25 successThreshold: 1 - failureThreshold: 3 + failureThreshold: 15 resources: {} terminationMessagePath: "/dev/termination-log" volumes: @@ -120,9 +120,17 @@ objects: dnsPolicy: ClusterFirst securityContext: {} parameters: +- name: NODE_LABELS + displayName: Daemonset Node Labels + description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' + value: '{ "glusterfs": "storage-host" }' - name: IMAGE_NAME - displayName: GlusterFS container name + displayName: GlusterFS container image name required: True - name: IMAGE_VERSION - displayName: GlusterFS container versiona + displayName: GlusterFS container image version required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances + value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml index df045c170..e3fa0a9fb 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml @@ -8,15 +8,13 @@ metadata: annotations: description: Heketi service deployment template tags: glusterfs,heketi -labels: - template: heketi objects: - kind: Service apiVersion: v1 metadata: - name: heketi + name: heketi-${CLUSTER_NAME} labels: - glusterfs: heketi-service + glusterfs: heketi-${CLUSTER_NAME}-service annotations: description: Exposes Heketi service spec: @@ -25,40 +23,40 @@ objects: port: 8080 targetPort: 8080 selector: - glusterfs: heketi-pod + glusterfs: heketi-${CLUSTER_NAME}-pod - kind: Route apiVersion: v1 metadata: - name: heketi + name: ${HEKETI_ROUTE} labels: - glusterfs: heketi-route + glusterfs: heketi-${CLUSTER_NAME}-route spec: to: kind: Service - name: heketi + name: heketi-${CLUSTER_NAME} - kind: DeploymentConfig apiVersion: v1 metadata: - name: heketi + name: heketi-${CLUSTER_NAME} labels: - glusterfs: heketi-dc + glusterfs: heketi-${CLUSTER_NAME}-dc annotations: description: Defines how to deploy Heketi spec: replicas: 1 selector: - glusterfs: heketi-pod + glusterfs: heketi-${CLUSTER_NAME}-pod triggers: - type: ConfigChange strategy: type: Recreate template: metadata: - name: heketi + name: heketi-${CLUSTER_NAME} labels: - glusterfs: heketi-pod + glusterfs: heketi-${CLUSTER_NAME}-pod spec: - serviceAccountName: heketi-service-account + serviceAccountName: heketi-${CLUSTER_NAME}-service-account containers: - name: heketi image: ${IMAGE_NAME}:${IMAGE_VERSION} @@ -76,6 +74,8 @@ objects: value: '14' - name: HEKETI_KUBE_GLUSTER_DAEMONSET value: '1' + - name: HEKETI_KUBE_NAMESPACE + value: ${HEKETI_KUBE_NAMESPACE} ports: - containerPort: 8080 volumeMounts: @@ -96,7 +96,7 @@ objects: volumes: - name: db glusterfs: - endpoints: heketi-storage-endpoints + endpoints: heketi-db-${CLUSTER_NAME}-endpoints path: heketidbstorage parameters: - name: HEKETI_USER_KEY @@ -105,9 +105,21 @@ parameters: - name: HEKETI_ADMIN_KEY displayName: Heketi Administrator Secret description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_KUBE_NAMESPACE + displayName: Namespace + description: Set the namespace where the GlusterFS pods reside + value: default +- name: HEKETI_ROUTE + displayName: heketi route name + description: Set the hostname for the route URL + value: "heketi-glusterfs" - name: IMAGE_NAME - displayName: GlusterFS container name + displayName: heketi container image name required: True - name: IMAGE_VERSION - displayName: GlusterFS container versiona + displayName: heketi container image version required: True +- name: CLUSTER_NAME + displayName: GlusterFS cluster name + description: A unique name to identify this heketi service, useful for running multiple heketi instances + value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index fa5fa2cb0..4406ef28b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -5,12 +5,6 @@ name: "{{ glusterfs_namespace }}" when: glusterfs_is_native or glusterfs_heketi_is_native -- include: glusterfs_deploy.yml - when: glusterfs_is_native - -- name: Make sure heketi-client is installed - package: name=heketi-client state=present - - name: Delete pre-existing heketi resources oc_obj: namespace: "{{ glusterfs_namespace }}" @@ -21,12 +15,18 @@ with_items: - kind: "template,route,service,dc,jobs,secret" selector: "deploy-heketi" - - kind: "template,route,service,dc" - name: "heketi" - - kind: "svc,ep" + - kind: "svc" name: "heketi-storage-endpoints" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-topology-secret" + - kind: "template,route,service,dc" + name: "heketi-{{ glusterfs_name }}" + - kind: "svc" + name: "heketi-db-{{ glusterfs_name }}-endpoints" - kind: "sa" - name: "heketi-service-account" + name: "heketi-{{ glusterfs_name }}-service-account" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-user-secret" failed_when: False when: glusterfs_heketi_wipe @@ -35,11 +35,11 @@ namespace: "{{ glusterfs_namespace }}" kind: pod state: list - selector: "glusterfs=deploy-heketi-pod" + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" register: heketi_pod until: "heketi_pod.results.results[0]['items'] | count == 0" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe - name: Wait for heketi pods to terminate @@ -47,23 +47,26 @@ namespace: "{{ glusterfs_namespace }}" kind: pod state: list - selector: "glusterfs=heketi-pod" + selector: "glusterfs=heketi-{{ glusterfs_name }}-pod" register: heketi_pod until: "heketi_pod.results.results[0]['items'] | count == 0" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe +- include: glusterfs_deploy.yml + when: glusterfs_is_native + - name: Create heketi service account oc_serviceaccount: namespace: "{{ glusterfs_namespace }}" - name: heketi-service-account + name: "heketi-{{ glusterfs_name }}-service-account" state: present when: glusterfs_heketi_is_native - name: Add heketi service account to privileged SCC oc_adm_policy_user: - user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account" resource_kind: scc resource_name: privileged state: present @@ -71,7 +74,7 @@ - name: Allow heketi service account to view/edit pods oc_adm_policy_user: - user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account" resource_kind: role resource_name: edit state: present @@ -82,7 +85,7 @@ namespace: "{{ glusterfs_namespace }}" state: list kind: pod - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" register: heketi_pod when: glusterfs_heketi_is_native @@ -100,7 +103,7 @@ namespace: "{{ glusterfs_namespace }}" state: list kind: pod - selector: "glusterfs=heketi-pod" + selector: "glusterfs=heketi-{{ glusterfs_name }}-pod" register: heketi_pod when: glusterfs_heketi_is_native @@ -113,48 +116,46 @@ # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" -- include: heketi_deploy_part1.yml +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" when: - - glusterfs_heketi_is_native - - glusterfs_heketi_deploy_is_missing - - glusterfs_heketi_is_missing + - glusterfs_heketi_topology_load -- name: Determine heketi URL - oc_obj: - namespace: "{{ glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs in (deploy-heketi-service, heketi-service)" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" +- name: Generate heketi admin key + set_fact: + glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" when: - glusterfs_heketi_is_native - - glusterfs_heketi_url is undefined + - glusterfs_heketi_admin_key is undefined -- name: Set heketi URL +- name: Generate heketi user key set_fact: - glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}" + until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key" + delay: 1 + retries: 10 when: - glusterfs_heketi_is_native - - glusterfs_heketi_url is undefined + - glusterfs_heketi_user_key is undefined + +- include: heketi_deploy_part1.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + +- name: Set heketi-cli command + set_fact: + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False -- name: Generate topology file - template: - src: "{{ openshift.common.examples_content_version }}/topology.json.j2" - dest: "{{ mktemp.stdout }}/topology.json" - when: - - glusterfs_heketi_topology_load - - name: Load heketi topology - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1" register: topology_load failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" when: @@ -164,3 +165,51 @@ when: - glusterfs_heketi_is_native - glusterfs_heketi_is_missing + +- name: Create heketi secret + oc_secret: + namespace: "{{ glusterfs_namespace }}" + state: present + name: "heketi-{{ glusterfs_name }}-secret" + type: "kubernetes.io/glusterfs" + force: True + contents: + - path: key + data: "{{ glusterfs_heketi_admin_key }}" + when: + - glusterfs_storageclass + +- name: Get heketi route + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: route + state: list + name: "heketi-{{ glusterfs_name }}" + register: heketi_route + when: + - glusterfs_storageclass + - glusterfs_heketi_is_native + +- name: Determine StorageClass heketi URL + set_fact: + glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}" + when: + - glusterfs_storageclass + - glusterfs_heketi_is_native + +- name: Generate GlusterFS StorageClass file + template: + src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2" + dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml" + when: + - glusterfs_storageclass + +- name: Create GlusterFS StorageClass + oc_obj: + state: present + kind: storageclass + name: "glusterfs-{{ glusterfs_name }}" + files: + - "{{ mktemp.stdout }}/glusterfs-storageclass.yml" + when: + - glusterfs_storageclass diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 451990240..dbfe126a4 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -3,7 +3,9 @@ glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" - glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" + glusterfs_name: "{{ openshift_storage_glusterfs_name }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" + glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" @@ -17,6 +19,7 @@ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" - glusterfs_nodes: "{{ g_glusterfs_hosts }}" + glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}" + glusterfs_nodes: "{{ groups.glusterfs }}" - include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 579112349..ea4dcc510 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,23 +1,24 @@ --- - assert: - that: "glusterfs_nodeselector.keys() | count == 1" - msg: Only one GlusterFS nodeselector key pair should be provided - -- assert: that: "glusterfs_nodes | count >= 3" msg: There must be at least three GlusterFS nodes specified - name: Delete pre-existing GlusterFS resources oc_obj: namespace: "{{ glusterfs_namespace }}" - kind: "template,daemonset" - name: glusterfs + kind: "{{ item.kind }}" + name: "{{ item.name }}" state: absent + with_items: + - kind: template + name: glusterfs + - kind: daemonset + name: "glusterfs-{{ glusterfs_name }}" when: glusterfs_wipe - name: Unlabel any existing GlusterFS nodes oc_label: - name: "{{ item }}" + name: "{{ hostvars[item].openshift.common.hostname }}" kind: node state: absent labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" @@ -40,11 +41,16 @@ failed_when: False when: glusterfs_wipe - # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. + # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents - shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" + register: clear_devices + until: + - "'contains a filesystem in use' not in clear_devices.stderr" + delay: 1 + retries: 30 when: - glusterfs_wipe - item.stdout_lines | count > 0 @@ -61,13 +67,11 @@ - name: Label GlusterFS nodes oc_label: - name: "{{ glusterfs_host }}" + name: "{{ hostvars[item].openshift.common.hostname }}" kind: node state: add labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ glusterfs_nodes | default([]) }}" - loop_control: - loop_var: glusterfs_host - name: Copy GlusterFS DaemonSet template copy: @@ -78,7 +82,7 @@ oc_obj: namespace: "{{ glusterfs_namespace }}" kind: template - name: glusterfs + name: "glusterfs" state: present files: - "{{ mktemp.stdout }}/glusterfs-template.yml" @@ -91,17 +95,19 @@ params: IMAGE_NAME: "{{ glusterfs_image }}" IMAGE_VERSION: "{{ glusterfs_version }}" + NODE_LABELS: "{{ glusterfs_nodeselector }}" + CLUSTER_NAME: "{{ glusterfs_name }}" - name: Wait for GlusterFS pods oc_obj: namespace: "{{ glusterfs_namespace }}" kind: pod state: list - selector: "glusterfs-node=pod" + selector: "glusterfs={{ glusterfs_name }}-pod" register: glusterfs_pods until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 392f4b65b..0849f2a2e 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -3,7 +3,9 @@ glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" - glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" + glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" + glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" @@ -17,21 +19,23 @@ glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" - glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}" + glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}" - include: glusterfs_common.yml - when: g_glusterfs_registry_hosts != g_glusterfs_hosts + when: + - glusterfs_nodes | default([]) | count > 0 + - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs" - name: Delete pre-existing GlusterFS registry resources oc_obj: namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" - name: "{{ item.name | default(omit) }}" - selector: "{{ item.selector | default(omit) }}" + name: "{{ item.name }}" state: absent with_items: - - kind: "svc,ep" - name: "glusterfs-registry-endpoints" + - kind: "svc" + name: "glusterfs-{{ glusterfs_name }}-endpoints" failed_when: False - name: Generate GlusterFS registry endpoints @@ -40,8 +44,8 @@ dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" - name: Copy GlusterFS registry service - copy: - src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml" + template: + src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml.j2" dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Create GlusterFS registry endpoints @@ -49,7 +53,7 @@ namespace: "{{ glusterfs_namespace }}" state: present kind: endpoints - name: glusterfs-registry-endpoints + name: "glusterfs-{{ glusterfs_name }}-endpoints" files: - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml" @@ -58,14 +62,14 @@ namespace: "{{ glusterfs_namespace }}" state: present kind: service - name: glusterfs-registry-endpoints + name: "glusterfs-{{ glusterfs_name }}-endpoints" files: - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Check if GlusterFS registry volume exists - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list" + command: "{{ glusterfs_heketi_client }} volume list" register: registry_volume - name: Create GlusterFS registry volume - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index c14fcfb15..ea9b1fe1f 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -6,11 +6,21 @@ with_items: - "deploy-heketi-template.yml" -- name: Create deploy-heketi resources +- name: Create heketi topology secret + oc_secret: + namespace: "{{ glusterfs_namespace }}" + state: present + name: "heketi-{{ glusterfs_name }}-topology-secret" + force: True + files: + - name: topology.json + path: "{{ mktemp.stdout }}/topology.json" + +- name: Create deploy-heketi template oc_obj: namespace: "{{ glusterfs_namespace }}" kind: template - name: deploy-heketi + name: "deploy-heketi" state: present files: - "{{ mktemp.stdout }}/deploy-heketi-template.yml" @@ -23,19 +33,23 @@ params: IMAGE_NAME: "{{ glusterfs_heketi_image }}" IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}" HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" + HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" + CLUSTER_NAME: "{{ glusterfs_name }}" + TOPOLOGY_PATH: "{{ mktemp.stdout }}" - name: Wait for deploy-heketi pod oc_obj: namespace: "{{ glusterfs_namespace }}" kind: pod state: list - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" register: heketi_pod until: - "heketi_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 64410a9ab..26343b909 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,8 +1,10 @@ --- - name: Create heketi DB volume - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json" register: setup_storage - failed_when: False + +- name: Copy heketi-storage list + shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config @@ -28,7 +30,7 @@ # Pod's 'Complete' status must be True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" @@ -46,14 +48,45 @@ with_items: - kind: "template,route,service,jobs,dc,secret" selector: "deploy-heketi" - failed_when: False + - kind: "svc" + name: "heketi-storage-endpoints" + - kind: "secret" + name: "heketi-{{ glusterfs_name }}-topology-secret" + +- name: Generate heketi endpoints + template: + src: "{{ openshift.common.examples_content_version }}/heketi-endpoints.yml.j2" + dest: "{{ mktemp.stdout }}/heketi-endpoints.yml" + +- name: Generate heketi service + template: + src: "{{ openshift.common.examples_content_version }}/heketi-service.yml.j2" + dest: "{{ mktemp.stdout }}/heketi-service.yml" + +- name: Create heketi endpoints + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: present + kind: endpoints + name: "heketi-db-{{ glusterfs_name }}-endpoints" + files: + - "{{ mktemp.stdout }}/heketi-endpoints.yml" + +- name: Create heketi service + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: present + kind: service + name: "heketi-db-{{ glusterfs_name }}-endpoints" + files: + - "{{ mktemp.stdout }}/heketi-service.yml" - name: Copy heketi template copy: src: "{{ openshift.common.examples_content_version }}/heketi-template.yml" dest: "{{ mktemp.stdout }}/heketi-template.yml" -- name: Create heketi resources +- name: Create heketi template oc_obj: namespace: "{{ glusterfs_namespace }}" kind: template @@ -70,40 +103,30 @@ params: IMAGE_NAME: "{{ glusterfs_heketi_image }}" IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}" HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" + HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" + CLUSTER_NAME: "{{ glusterfs_name }}" - name: Wait for heketi pod oc_obj: namespace: "{{ glusterfs_namespace }}" kind: pod state: list - selector: "glusterfs=heketi-pod" + selector: "glusterfs=heketi-{{ glusterfs_name }}-pod" register: heketi_pod until: - "heketi_pod.results.results[0]['items'] | count > 0" # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" - -- name: Determine heketi URL - oc_obj: - namespace: "{{ glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs=heketi-service" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout | int / 10) | int }}" -- name: Set heketi URL +- name: Set heketi-cli command set_fact: - glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" - name: Verify heketi service - command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index ebd8db453..d2d8c6c10 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -7,12 +7,11 @@ - include: glusterfs_config.yml when: - - g_glusterfs_hosts | default([]) | count > 0 + - groups.glusterfs | default([]) | count > 0 - include: glusterfs_registry.yml when: - - g_glusterfs_registry_hosts | default([]) | count > 0 - - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" + - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index 605627ab5..11c9195bb 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -1,7 +1,8 @@ +--- apiVersion: v1 kind: Endpoints metadata: - name: glusterfs-registry-endpoints + name: glusterfs-{{ glusterfs_name }}-endpoints subsets: - addresses: {% for node in glusterfs_nodes %} diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 index 3f8d8f507..3f869d2b7 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-service.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: glusterfs-registry-endpoints + name: glusterfs-{{ glusterfs_name }}-endpoints spec: ports: - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..5ea801e60 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + secretNamespace: "{{ glusterfs_namespace }}" + secretName: "heketi-{{ glusterfs_name }}-secret" diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} + - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: heketi-db-{{ glusterfs_name }}-endpoints +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 16792388f..f4cb8ddb2 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -84,115 +84,119 @@ - openshift_version is not defined - openshift_protect_installed_version | bool -- name: Set openshift_version for rpm installation - include: set_version_rpm.yml - when: not is_containerized | bool - -- name: Set openshift_version for containerized installation - include: set_version_containerized.yml - when: is_containerized | bool - -- block: - - name: Get available {{ openshift.common.service_type}} version - repoquery: - name: "{{ openshift.common.service_type}}" - ignore_excluders: true - register: rpm_results - - fail: - msg: "Package {{ openshift.common.service_type}} not found" - when: not rpm_results.results.package_found - - set_fact: - openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" - - name: Fail if rpm version and docker image version are different - fail: - msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" - # Both versions have the same string representation +# The rest of these tasks should only execute on +# masters and nodes as we can verify they have subscriptions +- when: + - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config'] + block: + - name: Set openshift_version for rpm installation + include: set_version_rpm.yml + when: not is_containerized | bool + + - name: Set openshift_version for containerized installation + include: set_version_containerized.yml + when: is_containerized | bool + + - block: + - name: Get available {{ openshift.common.service_type}} version + repoquery: + name: "{{ openshift.common.service_type}}" + ignore_excluders: true + register: rpm_results + - fail: + msg: "Package {{ openshift.common.service_type}} not found" + when: not rpm_results.results.package_found + - set_fact: + openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" + - name: Fail if rpm version and docker image version are different + fail: + msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" + # Both versions have the same string representation + when: + - openshift_rpm_version != openshift_version + # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ + - openshift_pkg_version is not defined + - openshift_image_tag is not defined when: - - openshift_rpm_version != openshift_version - # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ - - openshift_pkg_version is not defined - - openshift_image_tag is not defined - when: - - is_containerized | bool - - not is_atomic | bool - -# Warn if the user has provided an openshift_image_tag but is not doing a containerized install -# NOTE: This will need to be modified/removed for future container + rpm installations work. -- name: Warn if openshift_image_tag is defined when not doing a containerized install - debug: - msg: > - openshift_image_tag is used for containerized installs. If you are trying to - specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. - when: - - not is_containerized | bool - - openshift_image_tag is defined - + - is_containerized | bool + - not is_atomic | bool + + # Warn if the user has provided an openshift_image_tag but is not doing a containerized install + # NOTE: This will need to be modified/removed for future container + rpm installations work. + - name: Warn if openshift_image_tag is defined when not doing a containerized install + debug: + msg: > + openshift_image_tag is used for containerized installs. If you are trying to + specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node. + when: + - not is_containerized | bool + - openshift_image_tag is defined -# At this point we know openshift_version is set appropriately. Now we set -# openshift_image_tag and openshift_pkg_version, so all roles can always assume -# each of this variables *will* be set correctly and can use them per their -# intended purpose. + # At this point we know openshift_version is set appropriately. Now we set + # openshift_image_tag and openshift_pkg_version, so all roles can always assume + # each of this variables *will* be set correctly and can use them per their + # intended purpose. -- block: - - debug: - msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" + - block: + - debug: + msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" - - set_fact: - openshift_image_tag: v{{ openshift_version }} + - set_fact: + openshift_image_tag: v{{ openshift_version }} - when: openshift_image_tag is not defined + when: openshift_image_tag is not defined -- block: - - debug: - msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" + - block: + - debug: + msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}" - - set_fact: - openshift_pkg_version: -{{ openshift_version }} + - set_fact: + openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined + when: openshift_pkg_version is not defined -- fail: - msg: openshift_version role was unable to set openshift_version - name: Abort if openshift_version was not set - when: openshift_version is not defined + - fail: + msg: openshift_version role was unable to set openshift_version + name: Abort if openshift_version was not set + when: openshift_version is not defined -- fail: - msg: openshift_version role was unable to set openshift_image_tag - name: Abort if openshift_image_tag was not set - when: openshift_image_tag is not defined + - fail: + msg: openshift_version role was unable to set openshift_image_tag + name: Abort if openshift_image_tag was not set + when: openshift_image_tag is not defined -- fail: - msg: openshift_version role was unable to set openshift_pkg_version - name: Abort if openshift_pkg_version was not set - when: openshift_pkg_version is not defined + - fail: + msg: openshift_version role was unable to set openshift_pkg_version + name: Abort if openshift_pkg_version was not set + when: openshift_pkg_version is not defined -- fail: - msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." - name: Abort if openshift_pkg_version was not set - when: - - not is_containerized | bool - - openshift_version == '0.0' + - fail: + msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories." + name: Abort if openshift_pkg_version was not set + when: + - not is_containerized | bool + - openshift_version == '0.0' -# We can't map an openshift_release to full rpm version like we can with containers; make sure -# the rpm version we looked up matches the release requested and error out if not. -- name: For an RPM install, abort when the release requested does not match the available version. - when: - - not is_containerized | bool - - openshift_release is defined - assert: - that: - - openshift_version.startswith(openshift_release) | bool - msg: |- - You requested openshift_release {{ openshift_release }}, which is not matched by - the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} - on host {{ inventory_hostname }}. - We will only install the latest RPMs, so please ensure you are getting the release - you expect. You may need to adjust your Ansible inventory, modify the repositories - available on the host, or run the appropriate OpenShift upgrade playbook. + # We can't map an openshift_release to full rpm version like we can with containers; make sure + # the rpm version we looked up matches the release requested and error out if not. + - name: For an RPM install, abort when the release requested does not match the available version. + when: + - not is_containerized | bool + - openshift_release is defined + assert: + that: + - openshift_version.startswith(openshift_release) | bool + msg: |- + You requested openshift_release {{ openshift_release }}, which is not matched by + the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} + on host {{ inventory_hostname }}. + We will only install the latest RPMs, so please ensure you are getting the release + you expect. You may need to adjust your Ansible inventory, modify the repositories + available on the host, or run the appropriate OpenShift upgrade playbook. -# The end result of these three variables is quite important so make sure they are displayed and logged: -- debug: var=openshift_release + # The end result of these three variables is quite important so make sure they are displayed and logged: + - debug: var=openshift_release -- debug: var=openshift_image_tag + - debug: var=openshift_image_tag -- debug: var=openshift_pkg_version + - debug: var=openshift_pkg_version diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml index 8793d954e..33e94cf1f 100644 --- a/test/integration/openshift_health_checker/setup_container.yml +++ b/test/integration/openshift_health_checker/setup_container.yml @@ -43,3 +43,6 @@ delegate_facts: True delegate_to: "{{ container_name }}" with_dict: "{{ l_host_vars | default({}) }}" + +- include: ../../../playbooks/byo/openshift-cluster/initialize_groups.yml +- include: ../../../playbooks/common/openshift-cluster/evaluate_groups.yml |