From 2bf65cdab4aa88f160d005d3b7649b22a6dceba8 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Fri, 8 Dec 2017 08:42:32 -0500 Subject: Cleanup byo references --- .papr.sh | 2 +- ansible.cfg | 4 +- images/installer/root/exports/manifest.json | 2 +- inventory/.gitignore | 1 + inventory/README.md | 6 +- inventory/byo/.gitignore | 1 - inventory/byo/hosts.byo.glusterfs.external.example | 60 -- inventory/byo/hosts.byo.glusterfs.mixed.example | 63 -- inventory/byo/hosts.byo.glusterfs.native.example | 50 - .../byo/hosts.byo.glusterfs.registry-only.example | 56 - ...osts.byo.glusterfs.storage-and-registry.example | 67 -- inventory/byo/hosts.example | 1089 -------------------- inventory/byo/hosts.openstack | 37 - inventory/hosts.example | 1089 ++++++++++++++++++++ inventory/hosts.glusterfs.external.example | 60 ++ inventory/hosts.glusterfs.mixed.example | 63 ++ inventory/hosts.glusterfs.native.example | 50 + inventory/hosts.glusterfs.registry-only.example | 56 + .../hosts.glusterfs.storage-and-registry.example | 67 ++ inventory/hosts.openstack | 37 + openshift-ansible.spec | 104 +- playbooks/aws/README.md | 4 +- playbooks/byo/config.yml | 3 - playbooks/openshift-logging/config.yml | 2 +- .../private/redeploy-openshift-ca.yml | 2 +- playbooks/openstack/README.md | 2 +- playbooks/openstack/advanced-configuration.md | 4 +- roles/openshift_health_checker/HOWTO_CHECKS.md | 2 +- roles/openshift_management/README.md | 10 +- roles/openshift_management/defaults/main.yml | 2 +- setup.py | 50 +- 31 files changed, 1516 insertions(+), 1529 deletions(-) create mode 100644 inventory/.gitignore delete mode 100644 inventory/byo/.gitignore delete mode 100644 inventory/byo/hosts.byo.glusterfs.external.example delete mode 100644 inventory/byo/hosts.byo.glusterfs.mixed.example delete mode 100644 inventory/byo/hosts.byo.glusterfs.native.example delete mode 100644 inventory/byo/hosts.byo.glusterfs.registry-only.example delete mode 100644 inventory/byo/hosts.byo.glusterfs.storage-and-registry.example delete mode 100644 inventory/byo/hosts.example delete mode 100644 inventory/byo/hosts.openstack create mode 100644 inventory/hosts.example create mode 100644 inventory/hosts.glusterfs.external.example create mode 100644 inventory/hosts.glusterfs.mixed.example create mode 100644 inventory/hosts.glusterfs.native.example create mode 100644 inventory/hosts.glusterfs.registry-only.example create mode 100644 inventory/hosts.glusterfs.storage-and-registry.example create mode 100644 inventory/hosts.openstack delete mode 100644 playbooks/byo/config.yml diff --git a/.papr.sh b/.papr.sh index 80453d4b2..c7b06f059 100755 --- a/.papr.sh +++ b/.papr.sh @@ -35,7 +35,7 @@ trap upload_journals ERR # run the actual installer # FIXME: override openshift_image_tag defined in the inventory until # https://github.com/openshift/openshift-ansible/issues/4478 is fixed. -ansible-playbook -vvv -i .papr.inventory playbooks/byo/config.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG" +ansible-playbook -vvv -i .papr.inventory playbooks/deploy_cluster.yml -e "openshift_image_tag=$OPENSHIFT_IMAGE_TAG" ### DISABLING TESTS FOR NOW, SEE: ### https://github.com/openshift/openshift-ansible/pull/6132 diff --git a/ansible.cfg b/ansible.cfg index 9900d28f8..e4d72553e 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -30,8 +30,8 @@ inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt # work around privilege escalation timeouts in ansible: timeout = 30 -# Uncomment to use the provided BYO inventory -#inventory = inventory/byo/hosts.example +# Uncomment to use the provided example inventory +#inventory = inventory/hosts.example [inventory] # fail more helpfully when the inventory file does not parse (Ansible 2.4+) diff --git a/images/installer/root/exports/manifest.json b/images/installer/root/exports/manifest.json index 8b984d7a3..53696b03e 100644 --- a/images/installer/root/exports/manifest.json +++ b/images/installer/root/exports/manifest.json @@ -4,7 +4,7 @@ "OPTS": "", "VAR_LIB_OPENSHIFT_INSTALLER" : "/var/lib/openshift-installer", "VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log", - "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/byo/config.yml", + "PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml", "HOME_ROOT": "/root", "ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg", "INVENTORY_FILE": "/dev/null" diff --git a/inventory/.gitignore b/inventory/.gitignore new file mode 100644 index 000000000..6ff331c7e --- /dev/null +++ b/inventory/.gitignore @@ -0,0 +1 @@ +hosts diff --git a/inventory/README.md b/inventory/README.md index 5e26e3c32..2e348194f 100644 --- a/inventory/README.md +++ b/inventory/README.md @@ -1,5 +1 @@ -# OpenShift Ansible inventory config files - -You can install OpenShift on: - -* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your pre-existing hosts +# OpenShift Ansible example inventory config files diff --git a/inventory/byo/.gitignore b/inventory/byo/.gitignore deleted file mode 100644 index 6ff331c7e..000000000 --- a/inventory/byo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -hosts diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example deleted file mode 100644 index acf68266e..000000000 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ /dev/null @@ -1,60 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use an external GlusterFS cluster -openshift_storage_glusterfs_is_native=False -# Specify the IP address or hostname of the external heketi service -openshift_storage_glusterfs_heketi_url=172.0.0.1 - -[masters] -master - -[nodes] -master openshift_schedulable=False -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" -# and "glusterfs_devices" variables defined. -# -# The first variable indicates the hostname of the external GLusterFS node, -# and must be reachable by the external heketi service. -# -# The second variable is a list of block devices the node will have access to -# that are intended solely for use as GlusterFS storage. These block devices -# must be bare (e.g. have no data, not be marked as LVM PVs), and will be -# formatted. -[glusterfs] -node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' -node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example deleted file mode 100644 index a559dc377..000000000 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ /dev/null @@ -1,63 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use an external GlusterFS cluster and a native -# heketi service -openshift_storage_glusterfs_is_native=False -openshift_storage_glusterfs_heketi_is_native=True -# Specify that heketi will use SSH to communicate to the GlusterFS nodes and -# the private key file it will use for authentication -openshift_storage_glusterfs_heketi_executor=ssh -openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa -[masters] -master - -[nodes] -master openshift_schedulable=False -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes of the external -# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" -# and "glusterfs_devices" variables defined. -# -# The first variable indicates the hostname of the external GLusterFS node, -# and must be reachable by the external heketi service. -# -# The second variable is a list of block devices the node will have access to -# that are intended solely for use as GlusterFS storage. These block devices -# must be bare (e.g. have no data, not be marked as LVM PVs), and will be -# formatted. -[glusterfs] -node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' -node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' -node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example deleted file mode 100644 index ca4765c53..000000000 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ /dev/null @@ -1,50 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for applications. It -# will also autmatically create a StorageClass for this purpose. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage. -# -# This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin - -[masters] -master - -[nodes] -master openshift_schedulable=False -# A hosted registry, by default, will only be deployed on nodes labeled -# "region=infra". -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example deleted file mode 100644 index 32040f593..000000000 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ /dev/null @@ -1,56 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for exclusive use -# as storage for a natively hosted Docker registry. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage, which will use that storage to create a -# volume that will provide backend storage for a hosted Docker registry. -# -# This inventory may also be used with byo/openshift-glusterfs/registry.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs_registry - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs - -[masters] -master - -[nodes] -master openshift_schedulable=False -# A hosted registry, by default, will only be deployed on nodes labeled -# "region=infra". -node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs_registry] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example deleted file mode 100644 index 9bd37cbf6..000000000 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ /dev/null @@ -1,67 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for both general -# application use and a natively hosted Docker registry. It will also create a -# StorageClass for the general storage. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage. -# -# This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs -glusterfs_registry - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs - -[masters] -master - -[nodes] -master openshift_schedulable=False -# It is recommended to not use a single cluster for both general and registry -# storage, so two three-node clusters will be required. -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True -# A hosted registry, by default, will only be deployed on nodes labeled -# "region=infra". -node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' - -[glusterfs_registry] -node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example deleted file mode 100644 index e3b56d7a1..000000000 --- a/inventory/byo/hosts.example +++ /dev/null @@ -1,1089 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb -nfs - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false - -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_user=root - -# If ansible_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -#ansible_become=yes - -# Debug level for all OpenShift components (Defaults to 2) -debug_level=2 - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -openshift_deployment_type=origin -#openshift_deployment_type=openshift-enterprise - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release=v3.7 - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.7.0 - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.7.0 - -# This enables all the system containers except for docker: -#openshift_use_system_containers=False -# -# But you can choose separately each component that must be a -# system container: -# -#openshift_use_openvswitch_system_container=False -#openshift_use_node_system_container=False -#openshift_use_master_system_container=False -#openshift_use_etcd_system_container=False -# -# In either case, system_images_registry must be specified to be able to find the system images -#system_images_registry="docker.io" -# when openshift_deployment_type=='openshift-enterprise' -#system_images_registry="registry.access.redhat.com" - -# Manage openshift example imagestreams and templates during install and upgrade -#openshift_install_examples=true - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensionScripts in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] - -# Configure extensionStylesheets in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Configure master API rate limits for external clients -#openshift_master_external_ratelimit_qps=200 -#openshift_master_external_ratelimit_burst=400 -# Configure master API rate limits for loopback clients -#openshift_master_loopback_ratelimit_qps=300 -#openshift_master_loopback_ratelimit_burst=600 - -# Docker Configuration -# Add additional, insecure, and blocked registries to global docker configuration -# For enterprise deployment types we ensure that registry.access.redhat.com is -# included if you do not include it -#openshift_docker_additional_registries=registry.example.com -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# Disable pushing to dockerhub -#openshift_docker_disable_push_dockerhub=True -# Use Docker inside a System Container. Note that this is a tech preview and should -# not be used to upgrade! -# The following options for docker are ignored: -# - docker_version -# - docker_upgrade -# The following options must not be used -# - openshift_docker_options -#openshift_docker_use_system_container=False -# Install and run cri-o along side docker -# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override -# just as container-engine does. -#openshift_use_crio=False -# Force the registry to use for the container-engine/crio system container. By default the registry -# will be built off of the deployment type and ansible_distribution. Only -# use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" -#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" -# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used -# unless you know what you are doing!! -# The following two variables are used when opneshift_use_crio is True -# and cleans up after builds that pass through docker. -# Enable docker garbage collection when using cri-o -#openshift_crio_enable_docker_gc=false -# Node Selectors to run the garbage collection -#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} - -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify exact version of etcd to configure or upgrade to. -# etcd_version="3.1.0" -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - -# Alternate image format string, useful if you've got your own registry mirror -# Configure this setting just on node or master -#oreg_url_master=example.com/openshift3/ose-${component}:${version} -#oreg_url_node=example.com/openshift3/ose-${component}:${version} -# For setting the configuration globally -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true - -# If oreg_url points to a registry requiring authentication, provide the following: -#oreg_auth_user=some_user -#oreg_auth_password='my-pass' -# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. -# oreg_auth_pass should be generated from running docker login. -# To update registry auth credentials, uncomment the following: -#oreg_auth_credentials_replace: True - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '', 'user2': ''} -# or -#openshift_master_htpasswd_file= - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca= -# or -#openshift_master_ldap_ca_file= - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca= -# or -#openshift_master_openid_ca_file= - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca= -# or -#openshift_master_request_header_ca_file= - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_management/README.md for instructions -# and requirements. -#openshift_management_install_management=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" -#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# GCE -#openshift_cloudprovider_kind=gce - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# Native high availability (default cluster method) -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# default subdomain to use for exposed routes -#openshift_master_default_subdomain=apps.test.example.com - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_router_selector='region=infra' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router (optional) -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix: -# origin default is "cockpit/", enterprise default is "openshift3/" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# origin default is "kubernetes", enterprise default is "registry-console" -#openshift_cockpit_deployer_basename=my-console -# Override image version, defaults to latest for origin, vX.Y product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_registry_selector='region=infra' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry (optional) -#openshift_hosted_manage_registry=true - -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -#openshift_metrics_install_metrics=true -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com -# Configure the prefix and version for the component images -#openshift_metrics_image_prefix=docker.io/openshift/origin- -#openshift_metrics_image_version=v3.7 -# when openshift_deployment_type=='openshift-enterprise' -#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/ -#openshift_metrics_image_version=v3.7 -# -# StorageClass -# openshift_storageclass_name=gp2 -# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} -# - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -#openshift_logging_install_logging=true -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{{ openshift_master_default_subdomain }} -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -#openshift_logging_es_cluster_size=1 -# Configure the prefix and version for the component images -#openshift_logging_image_prefix=docker.io/openshift/origin- -#openshift_logging_image_version=v3.7.0 -# when openshift_deployment_type=='openshift-enterprise' -#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/ -#openshift_logging_image_version=3.7.0 - -# Prometheus deployment -# -# Currently prometheus deployment is disabled by default, enable it by setting this -#openshift_hosted_prometheus_deploy=true -# -# Prometheus storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/prometheus" -#openshift_prometheus_storage_kind=nfs -#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_storage_nfs_directory=/exports -#openshift_prometheus_storage_nfs_options='*(rw,root_squash)' -#openshift_prometheus_storage_volume_name=prometheus -#openshift_prometheus_storage_volume_size=10Gi -#openshift_prometheus_storage_labels={'storage': 'prometheus'} -#openshift_prometheus_storage_type='pvc' -# For prometheus-alertmanager -#openshift_prometheus_alertmanager_storage_kind=nfs -#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_alertmanager_storage_nfs_directory=/exports -#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' -#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager -#openshift_prometheus_alertmanager_storage_volume_size=10Gi -#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} -#openshift_prometheus_alertmanager_storage_type='pvc' -# For prometheus-alertbuffer -#openshift_prometheus_alertbuffer_storage_kind=nfs -#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports -#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' -#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer -#openshift_prometheus_alertbuffer_storage_volume_size=10Gi -#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} -#openshift_prometheus_alertbuffer_storage_type='pvc' -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/prometheus" -#openshift_prometheus_storage_kind=nfs -#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_storage_host=nfs.example.com -#openshift_prometheus_storage_nfs_directory=/exports -#openshift_prometheus_storage_volume_name=prometheus -#openshift_prometheus_storage_volume_size=10Gi -#openshift_prometheus_storage_labels={'storage': 'prometheus'} -#openshift_prometheus_storage_type='pvc' -# For prometheus-alertmanager -#openshift_prometheus_alertmanager_storage_kind=nfs -#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_alertmanager_storage_host=nfs.example.com -#openshift_prometheus_alertmanager_storage_nfs_directory=/exports -#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager -#openshift_prometheus_alertmanager_storage_volume_size=10Gi -#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} -#openshift_prometheus_alertmanager_storage_type='pvc' -# For prometheus-alertbuffer -#openshift_prometheus_alertbuffer_storage_kind=nfs -#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] -#openshift_prometheus_alertbuffer_storage_host=nfs.example.com -#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports -#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer -#openshift_prometheus_alertbuffer_storage_volume_size=10Gi -#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} -#openshift_prometheus_alertbuffer_storage_type='pvc' -# -# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes -# which are destroyed when pods are deleted - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 - -# set exact RPM version (include - prefix) -#openshift_pkg_version=-3.6.0 -# you may also specify version and release, ie: -#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.org/latest/install_config/certificate_customization.html -# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -# Configure dnsIP in the node config -#openshift_dns_ip=172.30.0.1 - -# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']} - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# openshift-ansible will wait indefinitely for your input when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to true will override that check. -#openshift_override_hostname_check=true - -# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail -# in versions >= 3.6 -#openshift_use_dnsmasq=False - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable service catalog -#openshift_enable_service_catalog=true - -# Enable template service broker (requires service catalog to be enabled, above) -#template_service_broker_install=true - -# Force a specific prefix (IE: registry) to use when pulling the service catalog image -# NOTE: The registry all the way up to the start of the image name must be provided. Two examples -# below are provided. -#openshift_service_catalog_image_prefix=docker.io/openshift/origin- -#openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose- -# Force a specific image version to use when pulling the service catalog image -#openshift_service_catalog_image_version=v3.7 - -# Configure one of more namespaces whose templates will be served by the TSB -#openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=False - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -#openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift service: node, master (api and controllers). -# API and controllers environment variables are merged in single -# master environments. -#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} - -# Enable API service auditing -#openshift_master_audit_config={"enabled": true} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal=true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal=false - -###################################################################### -# CloudForms/ManageIQ (CFME/MIQ) Configuration - -# See the readme for full descriptions and getting started -# instructions: ../../roles/openshift_management/README.md or go directly to -# their definitions: ../../roles/openshift_management/defaults/main.yml -# ../../roles/openshift_management/vars/main.yml -# -# Namespace for the CFME project -#openshift_management_project: openshift-management - -# Namespace/project description -#openshift_management_project_description: CloudForms Management Engine - -# Choose 'miq-template' for a podified database install -# Choose 'miq-template-ext-db' for an external database install -# -# If you are using the miq-template-ext-db template then you must add -# the required database parameters to the -# openshift_management_template_parameters variable. -#openshift_management_app_template: miq-template - -# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. -#openshift_management_storage_class: nfs - -# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a -# netapp appliance, then you must set the hostname here. Leave the -# value as 'false' if you are not using external NFS. -#openshift_management_storage_nfs_external_hostname: false - -# [OPTIONAL] - If you are using external NFS then you must set the base -# path to the exports location here. -# -# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports -# that will back the application PV and optionally the database -# pv. Export path definitions, relative to -# {{ openshift_management_storage_nfs_base_dir }} -# -# LOCAL NFS NOTE: -# -# You may may also change this value if you want to change the default -# path used for local NFS exports. -#openshift_management_storage_nfs_base_dir: /exports - -# LOCAL NFS NOTE: -# -# You may override the automatically selected LOCAL NFS server by -# setting this variable. Useful for testing specific task files. -#openshift_management_storage_nfs_local_hostname: false - -# These are the default values for the username and password of the -# management app. Changing these values in your inventory will not -# change your username or password. You should only need to change -# these values in your inventory if you already changed the actual -# name and password AND are trying to use integration scripts. -# -# For example, adding this cluster as a container provider, -# playbooks/byo/openshift-management/add_container_provider.yml -#openshift_management_username: admin -#openshift_management_password: smartvm - -# A hash of parameters you want to override or set in the -# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in -# your inventory file as a simple hash. Acceptable values are defined -# under the .parameters list in files/miq-template{-ext-db}.yaml -# Example: -# -# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} -#openshift_management_template_parameters: {} - -# Firewall configuration -# You can open additional firewall ports by defining them as a list. of service -# names and ports/port ranges for either masters or nodes. -#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] -#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] - -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -# NOTE: Containerized load balancer hosts are not yet supported, if using a global -# containerized=true host variable we must set to false. -[lb] -ose3-lb-ansible.test.example.com containerized=false - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" - -[nfs] -ose3-nfs-ansible.test.example.com diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack deleted file mode 100644 index c648078c4..000000000 --- a/inventory/byo/hosts.openstack +++ /dev/null @@ -1,37 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -ansible_ssh_user=cloud-user -ansible_become=yes - -# Debug level for all OpenShift components (Defaults to 2) -debug_level=2 - -openshift_deployment_type=openshift-enterprise - -openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}] - -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}] - -#openshift_pkg_version=-3.0.0.0 - -[masters] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" - -[etcd] -jdetiber-etcd.usersys.redhat.com - -[lb] -#ose3-lb-ansible.test.example.com - -[nodes] -jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" -jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/hosts.example b/inventory/hosts.example new file mode 100644 index 000000000..c18a53671 --- /dev/null +++ b/inventory/hosts.example @@ -0,0 +1,1089 @@ +# This is an example of an OpenShift-Ansible host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb +nfs + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# Enable unsupported configurations, things that will yield a partially +# functioning cluster but would not be supported for production use +#openshift_enable_unsupported_configurations=false + +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_user=root + +# If ansible_user is not root, ansible_become must be set to true and the +# user must be configured for passwordless sudo +#ansible_become=yes + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +# Specify the deployment type. Valid values are origin and openshift-enterprise. +openshift_deployment_type=origin +#openshift_deployment_type=openshift-enterprise + +# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we +# rely on the version running on the first master. Works best for containerized installs where we can usually +# use this to lookup the latest exact version of the container images, which is the tag actually used to configure +# the cluster. For RPM installations we just verify the version detected in your configured repos matches this +# release. +openshift_release=v3.7 + +# Specify an exact container image tag to install or configure. +# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_image_tag=v3.7.0 + +# Specify an exact rpm version to install or configure. +# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_pkg_version=-3.7.0 + +# This enables all the system containers except for docker: +#openshift_use_system_containers=False +# +# But you can choose separately each component that must be a +# system container: +# +#openshift_use_openvswitch_system_container=False +#openshift_use_node_system_container=False +#openshift_use_master_system_container=False +#openshift_use_etcd_system_container=False +# +# In either case, system_images_registry must be specified to be able to find the system images +#system_images_registry="docker.io" +# when openshift_deployment_type=='openshift-enterprise' +#system_images_registry="registry.access.redhat.com" + +# Manage openshift example imagestreams and templates during install and upgrade +#openshift_install_examples=true + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensionScripts in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] + +# Configure extensionStylesheets in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets +#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure imagePolicyConfig in the master config +# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + +# Configure master API rate limits for external clients +#openshift_master_external_ratelimit_qps=200 +#openshift_master_external_ratelimit_burst=400 +# Configure master API rate limits for loopback clients +#openshift_master_loopback_ratelimit_qps=300 +#openshift_master_loopback_ratelimit_burst=600 + +# Docker Configuration +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#openshift_docker_additional_registries=registry.example.com +#openshift_docker_insecure_registries=registry.example.com +#openshift_docker_blocked_registries=registry.hacker.com +# Disable pushing to dockerhub +#openshift_docker_disable_push_dockerhub=True +# Use Docker inside a System Container. Note that this is a tech preview and should +# not be used to upgrade! +# The following options for docker are ignored: +# - docker_version +# - docker_upgrade +# The following options must not be used +# - openshift_docker_options +#openshift_docker_use_system_container=False +# Install and run cri-o along side docker +# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override +# just as container-engine does. +#openshift_use_crio=False +# Force the registry to use for the container-engine/crio system container. By default the registry +# will be built off of the deployment type and ansible_distribution. Only +# use this option if you are sure you know what you are doing! +#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest" +#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest" +# NOTE: The following crio docker-gc items are tech preview and likely shouldn't be used +# unless you know what you are doing!! +# The following two variables are used when opneshift_use_crio is True +# and cleans up after builds that pass through docker. +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=false +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector: {'runtime': 'cri-o'} + +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +# Default value: "--log-driver=journald" +#openshift_docker_options="-l warn --ipv6=false" + +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.12.1" + +# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. +# Uncomment below to disable; for example if your kernel does not support the +# Docker overlay/overlay2 storage drivers with SELinux enabled. +#openshift_docker_selinux_enabled=False + +# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. +# docker_upgrade=False + +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0" +# Enable etcd debug logging, defaults to false +# etcd_debug=true +# Set etcd log levels by package +# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + +# Alternate image format string, useful if you've got your own registry mirror +# Configure this setting just on node or master +#oreg_url_master=example.com/openshift3/ose-${component}:${version} +#oreg_url_node=example.com/openshift3/ose-${component}:${version} +# For setting the configuration globally +#oreg_url=example.com/openshift3/ose-${component}:${version} +# If oreg_url points to a registry other than registry.access.redhat.com we can +# modify image streams to point at that registry by setting the following to true +#openshift_examples_modify_imagestreams=true + +# If oreg_url points to a registry requiring authentication, provide the following: +#oreg_auth_user=some_user +#oreg_auth_password='my-pass' +# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. +# oreg_auth_pass should be generated from running docker login. +# To update registry auth credentials, uncomment the following: +#oreg_auth_credentials_replace: True + +# OpenShift repository configuration +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +#openshift_repos_enable_testing=false + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] +# Defining htpasswd users +#openshift_master_htpasswd_users={'user1': '', 'user2': ''} +# or +#openshift_master_htpasswd_file= + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] +# +# Configure LDAP CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the LDAPPasswordIdentityProvider. +# +#openshift_master_ldap_ca= +# or +#openshift_master_ldap_ca_file= + +# OpenID auth +#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] +# +# Configure OpenID CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the OpenIDIdentityProvider. +# +#openshift_master_openid_ca= +# or +#openshift_master_openid_ca_file= + +# Request header auth +#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] +# +# Configure request header CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "clientCA" +# key set within the RequestHeaderIdentityProvider. +# +#openshift_master_request_header_ca= +# or +#openshift_master_request_header_ca_file= + +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_management/README.md for instructions +# and requirements. +#openshift_management_install_management=False + +# Cloud Provider Configuration +# +# Note: You may make use of environment variables rather than store +# sensitive configuration within the ansible inventory. +# For example: +#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}" +#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" +# +# AWS +#openshift_cloudprovider_kind=aws +# Note: IAM profiles may be used instead of storing API credentials on disk. +#openshift_cloudprovider_aws_access_key=aws_access_key_id +#openshift_cloudprovider_aws_secret_key=aws_secret_access_key +# +# Openstack +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ +#openshift_cloudprovider_openstack_username=username +#openshift_cloudprovider_openstack_password=password +#openshift_cloudprovider_openstack_domain_id=domain_id +#openshift_cloudprovider_openstack_domain_name=domain_name +#openshift_cloudprovider_openstack_tenant_id=tenant_id +#openshift_cloudprovider_openstack_tenant_name=tenant_name +#openshift_cloudprovider_openstack_region=region +#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id +# +# GCE +#openshift_cloudprovider_kind=gce + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure additional projects +#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availability (default cluster method) +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# default subdomain to use for exposed routes +#openshift_master_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# Override the default oauth tokenConfig settings: +# openshift_master_access_token_max_seconds=86400 +# openshift_master_auth_token_max_seconds=500 + +# Override master servingInfo.maxRequestsInFlight +#openshift_master_max_requests_inflight=500 + +# Override master and node servingInfo.minTLSVersion and .cipherSuites +# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 +# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants +#openshift_master_min_tls_version=VersionTLS12 +#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] +# +#openshift_node_min_tls_version=VersionTLS12 +#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_router_selector='region=infra' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router force subdomain (optional) +# A router path format to force on all routes used by this router +# (will ignore the route host value) +#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} +# +# Manage the OpenShift Router (optional) +#openshift_hosted_manage_router=true +# +# Router sharding support has been added and can be achieved by supplying the correct +# data to the inventory. The variable to house the data is openshift_hosted_routers +# and is in the form of a list. If no data is passed then a default router will be +# created. There are multiple combinations of router sharding. The one described +# below supports routers on separate nodes. +# +#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] + +# OpenShift Registry Console Options +# Override the console image prefix: +# origin default is "cockpit/", enterprise default is "openshift3/" +#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ +# origin default is "kubernetes", enterprise default is "registry-console" +#openshift_cockpit_deployer_basename=my-console +# Override image version, defaults to latest for origin, vX.Y product version for enterprise +#openshift_cockpit_deployer_version=1.4.1 + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "region=infra". Set openshift_node_labels per node as needed in +# order to label nodes. +# +# Example: +# [nodes] +# node.example.com openshift_node_labels="{'region': 'infra'}" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'region=infra' +#openshift_hosted_registry_selector='region=infra' +# +# Registry replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift registry selector. +#openshift_hosted_registry_replicas=2 +# +# Validity of the auto-generated certificate in days (optional) +#openshift_hosted_registry_cert_expire_days=730 +# +# Manage the OpenShift Registry (optional) +#openshift_hosted_manage_registry=true + +# Registry Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# Openstack +# Volume must already exist. +#openshift_hosted_registry_storage_kind=openstack +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem=ext4 +#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 +#openshift_hosted_registry_storage_volume_size=10Gi +# +# AWS S3 +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_encrypt=false +#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id +#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id +#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Any S3 service (Minio, ExoScale, ...): Basically the same as above +# but with regionendpoint configured +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_accesskey=access_key_id +#openshift_hosted_registry_storage_s3_secretkey=secret_access_key +#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Additional CloudFront Options. When using CloudFront all three +# of the followingg variables must be defined. +#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ +#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem +#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid + +# Metrics deployment +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +# +# By default metrics are not automatically deployed, set this to enable them +#openshift_metrics_install_metrics=true +# +# Storage Options +# If openshift_metrics_storage_kind is unset then metrics will be stored +# in an EmptyDir volume and will be deleted when the cassandra pod terminates. +# Storage options A & B currently support only one cassandra pod which is +# generally enough for up to 1000 pods. Additional volumes can be created +# manually after the fact and metrics scaled per the docs. +# +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_metrics_storage_kind=dynamic +# +# Other Metrics Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_metrics/README.md +# +# Override metricsPublicURL in the master config for cluster metrics +# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics +# Currently, you may only alter the hostname portion of the url, alterting the +# `/hawkular/metrics` path will break installation of metrics. +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com +# Configure the prefix and version for the component images +#openshift_metrics_image_prefix=docker.io/openshift/origin- +#openshift_metrics_image_version=v3.7 +# when openshift_deployment_type=='openshift-enterprise' +#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/ +#openshift_metrics_image_version=v3.7 +# +# StorageClass +# openshift_storageclass_name=gp2 +# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} +# + +# Logging deployment +# +# Currently logging deployment is disabled by default, enable it by setting this +#openshift_logging_install_logging=true +# +# Logging storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_logging_storage_kind=dynamic +# +# Option D - none -- Logging will use emptydir volumes which are destroyed when +# pods are deleted +# +# Other Logging Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_logging/README.md +# +# Configure loggingPublicURL in the master config for aggregate logging, defaults +# to kibana.{{ openshift_master_default_subdomain }} +#openshift_logging_kibana_hostname=logging.apps.example.com +# Configure the number of elastic search nodes, unless you're using dynamic provisioning +# this value must be 1 +#openshift_logging_es_cluster_size=1 +# Configure the prefix and version for the component images +#openshift_logging_image_prefix=docker.io/openshift/origin- +#openshift_logging_image_version=v3.7.0 +# when openshift_deployment_type=='openshift-enterprise' +#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/ +#openshift_logging_image_version=3.7.0 + +# Prometheus deployment +# +# Currently prometheus deployment is disabled by default, enable it by setting this +#openshift_hosted_prometheus_deploy=true +# +# Prometheus storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +#openshift_prometheus_storage_type='pvc' +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +#openshift_prometheus_alertmanager_storage_type='pvc' +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +#openshift_prometheus_alertbuffer_storage_type='pvc' +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/prometheus" +#openshift_prometheus_storage_kind=nfs +#openshift_prometheus_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_storage_host=nfs.example.com +#openshift_prometheus_storage_nfs_directory=/exports +#openshift_prometheus_storage_volume_name=prometheus +#openshift_prometheus_storage_volume_size=10Gi +#openshift_prometheus_storage_labels={'storage': 'prometheus'} +#openshift_prometheus_storage_type='pvc' +# For prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_kind=nfs +#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertmanager_storage_host=nfs.example.com +#openshift_prometheus_alertmanager_storage_nfs_directory=/exports +#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager +#openshift_prometheus_alertmanager_storage_volume_size=10Gi +#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} +#openshift_prometheus_alertmanager_storage_type='pvc' +# For prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_kind=nfs +#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] +#openshift_prometheus_alertbuffer_storage_host=nfs.example.com +#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports +#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer +#openshift_prometheus_alertbuffer_storage_volume_size=10Gi +#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} +#openshift_prometheus_alertbuffer_storage_type='pvc' +# +# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes +# which are destroyed when pods are deleted + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network and kubernetes service CIDR blocks. These +# network blocks should be private and should not conflict with network blocks +# in your infrastructure that pods may require access to. Can not be changed +# after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. +#osm_cluster_network_cidr=10.128.0.0/14 +#openshift_portal_net=172.30.0.0/16 + +# ExternalIPNetworkCIDRs controls what values are acceptable for the +# service external IP field. If empty, no externalIP may be set. It +# may contain a list of CIDRs which are checked for access. If a CIDR +# is prefixed with !, IPs in that CIDR will be rejected. Rejections +# will be applied first, then the IP checked against one of the +# allowed CIDRs. You should ensure this range does not overlap with +# your nodes, pods, or service CIDRs for security reasons. +#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] + +# IngressIPNetworkCIDR controls the range to assign ingress IPs from for +# services of type LoadBalancer on bare metal. If empty, ingress IPs will not +# be assigned. It may contain a single CIDR that will be allocated from. For +# security reasons, you should ensure that this range does not overlap with +# the CIDRs reserved for external IPs, nodes, pods, or services. +#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 + +# Configure number of bits to allocate to each host's subnet e.g. 9 +# would mean a /23 network on the host. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. +#osm_host_subnet_length=9 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 + +# set exact RPM version (include - prefix) +#openshift_pkg_version=-3.6.0 +# you may also specify version and release, ie: +#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7 + +# Configure custom ca certificate +#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} +# +# NOTE: CA certificate will not be replaced with existing clusters. +# This option may only be specified when creating a new cluster or +# when redeploying cluster certificates with the redeploy-certificates +# playbook. + +# Configure custom named certificates (SNI certificates) +# +# https://docs.openshift.org/latest/install_config/certificate_customization.html +# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html +# +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# +# An optional CA may be specified for each named certificate. CAs will +# be added to the OpenShift CA bundle which allows for the named +# certificate to be served for internal cluster communication. +# +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates=true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_set_node_ip=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. +#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']} + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# openshift-ansible will wait indefinitely for your input when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to true will override that check. +#openshift_override_hostname_check=true + +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 +#openshift_use_dnsmasq=False + +# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf +# This is useful for POC environments where DNS may not actually be available yet or to set +# options like 'strict-order' to alter dnsmasq configuration. +#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf + +# Global Proxy Configuration +# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment +# variables for docker and master services. +# +# Hosts in the openshift_no_proxy list will NOT use any globally +# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains +# (.example.com), hosts (example.com), and IP addresses. +#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT +#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT +#openshift_no_proxy='.hosts.example.com,some-host.com' +# +# Most environments don't require a proxy between openshift masters, nodes, and +# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. +# If all of your hosts share a common domain you may wish to disable this and +# specify that domain above instead. +# +# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and +# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy +# variable (above) and set this value to False +#openshift_generate_no_proxy_hosts=True +# +# These options configure the BuildDefaults admission controller which injects +# configuration into Builds. Proxy related values will default to the global proxy +# config values. You only need to set these if they differ from the global proxy settings. +# See BuildDefaults documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_no_proxy=mycorp.com +#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_no_proxy=mycorp.com +#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} +#openshift_builddefaults_resources_requests_cpu=100m +#openshift_builddefaults_resources_requests_memory=256Mi +#openshift_builddefaults_resources_limits_cpu=1000m +#openshift_builddefaults_resources_limits_memory=512Mi + +# Or you may optionally define your own build defaults configuration serialized as json +#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' + +# These options configure the BuildOverrides admission controller which injects +# configuration into Builds. +# See BuildOverrides documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_buildoverrides_force_pull=true +#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} + +# Or you may optionally define your own build overrides configuration serialized as json +#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' + +# Enable service catalog +#openshift_enable_service_catalog=true + +# Enable template service broker (requires service catalog to be enabled, above) +#template_service_broker_install=true + +# Force a specific prefix (IE: registry) to use when pulling the service catalog image +# NOTE: The registry all the way up to the start of the image name must be provided. Two examples +# below are provided. +#openshift_service_catalog_image_prefix=docker.io/openshift/origin- +#openshift_service_catalog_image_prefix=registry.access.redhat.com/openshift3/ose- +# Force a specific image version to use when pulling the service catalog image +#openshift_service_catalog_image_version=v3.7 + +# Configure one of more namespaces whose templates will be served by the TSB +#openshift_template_service_broker_namespaces=['openshift'] + +# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default +#openshift_master_dynamic_provisioning_enabled=False + +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + +# Configure usage of openshift_clock role. +#openshift_clock_enabled=true + +# OpenShift Per-Service Environment Variables +# Environment variables are added to /etc/sysconfig files for +# each OpenShift service: node, master (api and controllers). +# API and controllers environment variables are merged in single +# master environments. +#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} +#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} +#openshift_node_env_vars={"ENABLE_HTTP2": "true"} + +# Enable API service auditing +#openshift_master_audit_config={"enabled": true} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5} + +# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used +# by deployment_type=origin +#openshift_enable_origin_repo=false + +# Validity of the auto-generated OpenShift certificates in days. +# See also openshift_hosted_registry_cert_expire_days above. +# +#openshift_ca_cert_expire_days=1825 +#openshift_node_cert_expire_days=730 +#openshift_master_cert_expire_days=730 + +# Validity of the auto-generated external etcd certificates in days. +# Controls validity for etcd CA, peer, server and client certificates. +# +#etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false + +# Upgrade Control +# +# By default nodes are upgraded in a serial manner one at a time and all failures +# are fatal, one set of variables for normal nodes, one set of variables for +# nodes that are part of control plane as the number of hosts may be different +# in those two groups. +#openshift_upgrade_nodes_serial=1 +#openshift_upgrade_nodes_max_fail_percentage=0 +#openshift_upgrade_control_plane_nodes_serial=1 +#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 +# +# You can specify the number of nodes to upgrade at once. We do not currently +# attempt to verify that you have capacity to drain this many nodes at once +# so please be careful when specifying these values. You should also verify that +# the expected number of nodes are all schedulable and ready before starting an +# upgrade. If it's not possible to drain the requested nodes the upgrade will +# stall indefinitely until the drain is successful. +# +# If you're upgrading more than one node at a time you can specify the maximum +# percentage of failure within the batch before the upgrade is aborted. Any +# nodes that do fail are ignored for the rest of the playbook run and you should +# take care to investigate the failure and return the node to service so that +# your cluster. +# +# The percentage must exceed the value, this would fail on two failures +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 +# where as this would not +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal=true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal=false + +###################################################################### +# CloudForms/ManageIQ (CFME/MIQ) Configuration + +# See the readme for full descriptions and getting started +# instructions: ../../roles/openshift_management/README.md or go directly to +# their definitions: ../../roles/openshift_management/defaults/main.yml +# ../../roles/openshift_management/vars/main.yml +# +# Namespace for the CFME project +#openshift_management_project: openshift-management + +# Namespace/project description +#openshift_management_project_description: CloudForms Management Engine + +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +# +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. +#openshift_management_app_template: miq-template + +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +#openshift_management_storage_class: nfs + +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +#openshift_management_storage_nfs_external_hostname: false + +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# {{ openshift_management_storage_nfs_base_dir }} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +#openshift_management_storage_nfs_base_dir: /exports + +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +#openshift_management_storage_nfs_local_hostname: false + +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm + +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +#openshift_management_template_parameters: {} + +# Firewall configuration +# You can open additional firewall ports by defining them as a list. of service +# names and ports/port ranges for either masters or nodes. +#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] +#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +# NOTE: Containerized load balancer hosts are not yet supported, if using a global +# containerized=true host variable we must set to false. +[lb] +ose3-lb-ansible.test.example.com containerized=false + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" + +[nfs] +ose3-nfs-ansible.test.example.com diff --git a/inventory/hosts.glusterfs.external.example b/inventory/hosts.glusterfs.external.example new file mode 100644 index 000000000..bf2557cf0 --- /dev/null +++ b/inventory/hosts.glusterfs.external.example @@ -0,0 +1,60 @@ +# This is an example of an OpenShift-Ansible host inventory for a cluster +# with natively hosted, containerized GlusterFS storage. +# +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +etcd +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use an external GlusterFS cluster +openshift_storage_glusterfs_is_native=False +# Specify the IP address or hostname of the external heketi service +openshift_storage_glusterfs_heketi_url=172.0.0.1 + +[masters] +master + +[nodes] +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True + +[etcd] +master + +# Specify the glusterfs group, which contains the nodes of the external +# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" +# and "glusterfs_devices" variables defined. +# +# The first variable indicates the hostname of the external GLusterFS node, +# and must be reachable by the external heketi service. +# +# The second variable is a list of block devices the node will have access to +# that are intended solely for use as GlusterFS storage. These block devices +# must be bare (e.g. have no data, not be marked as LVM PVs), and will be +# formatted. +[glusterfs] +node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' +node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' +node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example new file mode 100644 index 000000000..8a20a037e --- /dev/null +++ b/inventory/hosts.glusterfs.mixed.example @@ -0,0 +1,63 @@ +# This is an example of an OpenShift-Ansible host inventory for a cluster +# with natively hosted, containerized GlusterFS storage. +# +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +etcd +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use an external GlusterFS cluster and a native +# heketi service +openshift_storage_glusterfs_is_native=False +openshift_storage_glusterfs_heketi_is_native=True +# Specify that heketi will use SSH to communicate to the GlusterFS nodes and +# the private key file it will use for authentication +openshift_storage_glusterfs_heketi_executor=ssh +openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa +[masters] +master + +[nodes] +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True + +[etcd] +master + +# Specify the glusterfs group, which contains the nodes of the external +# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" +# and "glusterfs_devices" variables defined. +# +# The first variable indicates the hostname of the external GLusterFS node, +# and must be reachable by the external heketi service. +# +# The second variable is a list of block devices the node will have access to +# that are intended solely for use as GlusterFS storage. These block devices +# must be bare (e.g. have no data, not be marked as LVM PVs), and will be +# formatted. +[glusterfs] +node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]' +node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]' +node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.native.example b/inventory/hosts.glusterfs.native.example new file mode 100644 index 000000000..59acf1194 --- /dev/null +++ b/inventory/hosts.glusterfs.native.example @@ -0,0 +1,50 @@ +# This is an example of an OpenShift-Ansible host inventory for a cluster +# with natively hosted, containerized GlusterFS storage for applications. It +# will also automatically create a StorageClass for this purpose. +# +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new +# cluster with GlusterFS storage. +# +# This inventory may also be used with openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +etcd +# Specify there will be GlusterFS nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin + +[masters] +master + +[nodes] +master openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True + +[etcd] +master + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example new file mode 100644 index 000000000..6f33e9f6d --- /dev/null +++ b/inventory/hosts.glusterfs.registry-only.example @@ -0,0 +1,56 @@ +# This is an example of an OpenShift-Ansible host inventory for a cluster +# with natively hosted, containerized GlusterFS storage for exclusive use +# as storage for a natively hosted Docker registry. +# +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new +# cluster with GlusterFS storage, which will use that storage to create a +# volume that will provide backend storage for a hosted Docker registry. +# +# This inventory may also be used with openshift-glusterfs/registry.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +etcd +# Specify there will be GlusterFS nodes +glusterfs_registry + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master + +[nodes] +master openshift_schedulable=False +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +[etcd] +master + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs_registry] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example new file mode 100644 index 000000000..1f3a4282a --- /dev/null +++ b/inventory/hosts.glusterfs.storage-and-registry.example @@ -0,0 +1,67 @@ +# This is an example of an OpenShift-Ansible host inventory for a cluster +# with natively hosted, containerized GlusterFS storage for both general +# application use and a natively hosted Docker registry. It will also create a +# StorageClass for the general storage. +# +# This inventory may be used with the deploy_cluster.yml playbook to deploy a new +# cluster with GlusterFS storage. +# +# This inventory may also be used with openshift-glusterfs/config.yml to +# deploy GlusterFS storage on an existing cluster. With this playbook, the +# registry backend volume will be created but the administrator must then +# either deploy a hosted registry or change an existing hosted registry to use +# that volume. +# +# There are additional configuration parameters that can be specified to +# control the deployment and state of a GlusterFS cluster. Please see the +# documentation in playbooks/openshift-glusterfs/README.md and +# roles/openshift_storage_glusterfs/README.md for additional details. + +[OSEv3:children] +masters +nodes +etcd +# Specify there will be GlusterFS nodes +glusterfs +glusterfs_registry + +[OSEv3:vars] +ansible_ssh_user=root +openshift_deployment_type=origin +# Specify that we want to use GlusterFS storage for a hosted registry +openshift_hosted_registry_storage_kind=glusterfs + +[masters] +master + +[nodes] +master openshift_schedulable=False +# It is recommended to not use a single cluster for both general and registry +# storage, so two three-node clusters will be required. +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True +# A hosted registry, by default, will only be deployed on nodes labeled +# "region=infra". +node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True + +[etcd] +master + +# Specify the glusterfs group, which contains the nodes that will host +# GlusterFS storage pods. At a minimum, each node must have a +# "glusterfs_devices" variable defined. This variable is a list of block +# devices the node will have access to that is intended solely for use as +# GlusterFS storage. These block devices must be bare (e.g. have no data, not +# be marked as LVM PVs), and will be formatted. +[glusterfs] +node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' + +[glusterfs_registry] +node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' +node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' diff --git a/inventory/hosts.openstack b/inventory/hosts.openstack new file mode 100644 index 000000000..d928c2b86 --- /dev/null +++ b/inventory/hosts.openstack @@ -0,0 +1,37 @@ +# This is an example of an OpenShift-Ansible host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +ansible_ssh_user=cloud-user +ansible_become=yes + +# Debug level for all OpenShift components (Defaults to 2) +debug_level=2 + +openshift_deployment_type=openshift-enterprise + +openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}] + +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}] + +#openshift_pkg_version=-3.0.0.0 + +[masters] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" + +[etcd] +jdetiber-etcd.usersys.redhat.com + +[lb] +#ose3-lb-ansible.test.example.com + +[nodes] +jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" +jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 7d543afdd..187405cd2 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -67,7 +67,7 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce # openshift-ansible-docs install # Install example inventory into docs/examples mkdir -p docs/example-inventories -cp inventory/byo/* docs/example-inventories/ +cp inventory/* docs/example-inventories/ # openshift-ansible-files install cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/ @@ -286,13 +286,13 @@ Atomic OpenShift Utilities includes %changelog * Thu Nov 23 2017 Jenkins CD Merge Bot 3.8.0-0.13.0 -- +- * Thu Nov 23 2017 Jenkins CD Merge Bot 3.8.0-0.12.0 -- +- * Thu Nov 23 2017 Jenkins CD Merge Bot 3.8.0-0.11.0 -- +- * Thu Nov 23 2017 Jenkins CD Merge Bot 3.8.0-0.10.0 - tox.ini: simplify unit test reqs (lmeyer@redhat.com) @@ -341,16 +341,16 @@ Atomic OpenShift Utilities includes - Include Deprecation - Init Playbook Paths (rteague@redhat.com) * Mon Nov 20 2017 Jenkins CD Merge Bot 3.8.0-0.8.0 -- +- * Mon Nov 20 2017 Jenkins CD Merge Bot 3.8.0-0.7.0 -- +- * Mon Nov 20 2017 Jenkins CD Merge Bot 3.8.0-0.6.0 -- +- * Sun Nov 19 2017 Jenkins CD Merge Bot 3.8.0-0.5.0 -- +- * Sun Nov 19 2017 Jenkins CD Merge Bot 3.8.0-0.4.0 - bug 1498398. Enclose content between store tag (rromerom@redhat.com) @@ -643,10 +643,10 @@ Atomic OpenShift Utilities includes - Allow cluster IP for docker-registry service to be set (hansmi@vshn.ch) * Thu Nov 09 2017 Jenkins CD Merge Bot 3.7.5-1 -- +- * Wed Nov 08 2017 Jenkins CD Merge Bot 3.7.4-1 -- +- * Wed Nov 08 2017 Jenkins CD Merge Bot 3.7.3-1 - Adding configuration for keeping transient namespace on error. @@ -816,10 +816,10 @@ Atomic OpenShift Utilities includes - GlusterFS: Remove image option from heketi command (jarrpa@redhat.com) * Mon Oct 30 2017 Jenkins CD Merge Bot 3.7.0-0.187.0 -- +- * Sun Oct 29 2017 Jenkins CD Merge Bot 3.7.0-0.186.0 -- +- * Sat Oct 28 2017 Jenkins CD Merge Bot 3.7.0-0.185.0 - bug 1506073. Lower cpu request for logging when it exceeds limit @@ -849,7 +849,7 @@ Atomic OpenShift Utilities includes - Refactor health check playbooks (rteague@redhat.com) * Fri Oct 27 2017 Jenkins CD Merge Bot 3.7.0-0.183.0 -- +- * Thu Oct 26 2017 Jenkins CD Merge Bot 3.7.0-0.182.0 - Fixing documentation for the cert_key_path variable name. @@ -923,16 +923,16 @@ Atomic OpenShift Utilities includes (hansmi@vshn.ch) * Mon Oct 23 2017 Jenkins CD Merge Bot 3.7.0-0.175.0 -- +- * Sun Oct 22 2017 Jenkins CD Merge Bot 3.7.0-0.174.0 -- +- * Sun Oct 22 2017 Jenkins CD Merge Bot 3.7.0-0.173.0 -- +- * Sun Oct 22 2017 Jenkins CD Merge Bot 3.7.0-0.172.0 -- +- * Sat Oct 21 2017 Jenkins CD Merge Bot 3.7.0-0.171.0 - Use "requests" for CPU resources instead of limits @@ -956,16 +956,16 @@ Atomic OpenShift Utilities includes (dymurray@redhat.com) * Fri Oct 20 2017 Jenkins CD Merge Bot 3.7.0-0.168.0 -- +- * Thu Oct 19 2017 Jenkins CD Merge Bot 3.7.0-0.167.0 -- +- * Thu Oct 19 2017 Jenkins CD Merge Bot 3.7.0-0.166.0 -- +- * Thu Oct 19 2017 Jenkins CD Merge Bot 3.7.0-0.165.0 -- +- * Thu Oct 19 2017 Jenkins CD Merge Bot 3.7.0-0.164.0 - Change to service-signer.crt for template_service_broker CA_BUNDLE @@ -988,7 +988,7 @@ Atomic OpenShift Utilities includes - Remove unneeded master config updates during upgrades (mgugino@redhat.com) * Wed Oct 18 2017 Jenkins CD Merge Bot 3.7.0-0.161.0 -- +- * Wed Oct 18 2017 Jenkins CD Merge Bot 3.7.0-0.160.0 - Fix pvc selector default to be empty dict instead of string @@ -1030,16 +1030,16 @@ Atomic OpenShift Utilities includes (jchaloup@redhat.com) * Sun Oct 15 2017 Jenkins CD Merge Bot 3.7.0-0.155.0 -- +- * Sat Oct 14 2017 Jenkins CD Merge Bot 3.7.0-0.154.0 -- +- * Fri Oct 13 2017 Jenkins CD Merge Bot 3.7.0-0.153.0 - default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com) * Fri Oct 13 2017 Jenkins CD Merge Bot 3.7.0-0.152.0 -- +- * Fri Oct 13 2017 Jenkins CD Merge Bot 3.7.0-0.151.0 - updated dynamic provision section for openshift metrics to support storage @@ -1448,7 +1448,7 @@ Atomic OpenShift Utilities includes - oc_atomic_container: support Skopeo output (gscrivan@redhat.com) * Tue Sep 05 2017 Jenkins CD Merge Bot 3.7.0-0.125.0 -- +- * Tue Sep 05 2017 Jenkins CD Merge Bot 3.7.0-0.124.0 - Fix ansible_syntax check (rteague@redhat.com) @@ -1475,7 +1475,7 @@ Atomic OpenShift Utilities includes (miciah.masters@gmail.com) * Wed Aug 30 2017 Jenkins CD Merge Bot 3.7.0-0.123.0 -- +- * Wed Aug 30 2017 Jenkins CD Merge Bot 3.7.0-0.122.0 - Update openshift_hosted_routers example to be in ini format. @@ -1537,10 +1537,10 @@ Atomic OpenShift Utilities includes - Add missing hostnames to registry cert (sdodson@redhat.com) * Fri Aug 25 2017 Jenkins CD Merge Bot 3.7.0-0.115.0 -- +- * Fri Aug 25 2017 Jenkins CD Merge Bot 3.7.0-0.114.0 -- +- * Fri Aug 25 2017 Jenkins CD Merge Bot 3.7.0-0.113.0 - openshift_version: enterprise accepts new style pre-release @@ -1558,13 +1558,13 @@ Atomic OpenShift Utilities includes - Setup tuned profiles in /etc/tuned (jmencak@redhat.com) * Thu Aug 24 2017 Jenkins CD Merge Bot 3.7.0-0.109.0 -- +- * Thu Aug 24 2017 Jenkins CD Merge Bot 3.7.0-0.108.0 -- +- * Thu Aug 24 2017 Jenkins CD Merge Bot 3.7.0-0.107.0 -- +- * Thu Aug 24 2017 Jenkins CD Merge Bot 3.7.0-0.106.0 - Add dotnet 2.0 to v3.6 (sdodson@redhat.com) @@ -1601,13 +1601,13 @@ Atomic OpenShift Utilities includes (sdodson@redhat.com) * Sat Aug 19 2017 Jenkins CD Merge Bot 3.7.0-0.103.0 -- +- * Fri Aug 18 2017 Jenkins CD Merge Bot 3.7.0-0.102.0 -- +- * Fri Aug 18 2017 Jenkins CD Merge Bot 3.7.0-0.101.0 -- +- * Fri Aug 18 2017 Jenkins CD Merge Bot 3.7.0-0.100.0 - Change memory requests and limits units (mak@redhat.com) @@ -1906,13 +1906,13 @@ Atomic OpenShift Utilities includes (kwoodson@redhat.com) * Mon Jul 17 2017 Jenkins CD Merge Bot 3.6.152-1 -- +- * Sun Jul 16 2017 Jenkins CD Merge Bot 3.6.151-1 -- +- * Sun Jul 16 2017 Jenkins CD Merge Bot 3.6.150-1 -- +- * Sat Jul 15 2017 Jenkins CD Merge Bot 3.6.149-1 - Config was missed before replace. (jkaur@redhat.com) @@ -1935,7 +1935,7 @@ Atomic OpenShift Utilities includes - GlusterFS: Fix SSH-based heketi configuration (jarrpa@redhat.com) * Wed Jul 12 2017 Jenkins CD Merge Bot 3.6.143-1 -- +- * Wed Jul 12 2017 Jenkins CD Merge Bot 3.6.142-1 - add scheduled pods check (jvallejo@redhat.com) @@ -1960,7 +1960,7 @@ Atomic OpenShift Utilities includes - updating fetch tasks to be flat paths (ewolinet@redhat.com) * Mon Jul 10 2017 Jenkins CD Merge Bot 3.6.140-1 -- +- * Sat Jul 08 2017 Jenkins CD Merge Bot 3.6.139-1 - increase implicit 300s default timeout to explicit 600s (jchaloup@redhat.com) @@ -2008,7 +2008,7 @@ Atomic OpenShift Utilities includes - Fully qualify ocp ansible_service_broker_image_prefix (sdodson@redhat.com) * Wed Jul 05 2017 Jenkins CD Merge Bot 3.6.134-1 -- +- * Tue Jul 04 2017 Jenkins CD Merge Bot 3.6.133-1 - etcd, syscontainer: fix copy of existing datastore (gscrivan@redhat.com) @@ -2020,7 +2020,7 @@ Atomic OpenShift Utilities includes - Fixes to storage migration (sdodson@redhat.com) * Mon Jul 03 2017 Jenkins CD Merge Bot 3.6.132-1 -- +- * Sun Jul 02 2017 Jenkins CD Merge Bot 3.6.131-1 - Fix upgrade (sdodson@redhat.com) @@ -2161,7 +2161,7 @@ Atomic OpenShift Utilities includes - bug 1457642. Use same SG index to avoid seeding timeout (jcantril@redhat.com) * Wed Jun 21 2017 Jenkins CD Merge Bot 3.6.122-1 -- +- * Tue Jun 20 2017 Jenkins CD Merge Bot 3.6.121-1 - Updating default from null to "" (ewolinet@redhat.com) @@ -2205,7 +2205,7 @@ Atomic OpenShift Utilities includes - CloudForms 4.5 templates (simaishi@redhat.com) * Fri Jun 16 2017 Jenkins CD Merge Bot 3.6.114-1 -- +- * Fri Jun 16 2017 Jenkins CD Merge Bot 3.6.113-1 - Make rollout status check best-effort, add poll (skuznets@redhat.com) @@ -2267,7 +2267,7 @@ Atomic OpenShift Utilities includes - singletonize some role tasks that repeat a lot (lmeyer@redhat.com) * Wed Jun 14 2017 Jenkins CD Merge Bot 3.6.109-1 -- +- * Wed Jun 14 2017 Jenkins CD Merge Bot 3.6.108-1 - Upgraded Calico to 2.2.1 Release (vincent.schwarzer@yahoo.de) @@ -2323,7 +2323,7 @@ Atomic OpenShift Utilities includes - Install default storageclass in AWS & GCE envs (hekumar@redhat.com) * Fri Jun 09 2017 Jenkins CD Merge Bot 3.6.98-1 -- +- * Fri Jun 09 2017 Scott Dodson 3.6.97-1 - Updated to using oo_random_word for secret gen (ewolinet@redhat.com) @@ -2355,7 +2355,7 @@ Atomic OpenShift Utilities includes loopback kubeconfigs. (abutcher@redhat.com) * Tue Jun 06 2017 Jenkins CD Merge Bot 3.6.89.2-1 -- +- * Tue Jun 06 2017 Jenkins CD Merge Bot 3.6.89.1-1 - Updating image for registry_console (ewolinet@redhat.com) @@ -2602,13 +2602,13 @@ Atomic OpenShift Utilities includes - Fix additional master cert & client config creation. (abutcher@redhat.com) * Tue May 09 2017 Jenkins CD Merge Bot 3.6.62-1 -- +- * Tue May 09 2017 Jenkins CD Merge Bot 3.6.61-1 -- +- * Mon May 08 2017 Jenkins CD Merge Bot 3.6.60-1 -- +- * Mon May 08 2017 Jenkins CD Merge Bot 3.6.59-1 - Updating logging and metrics to restart api, ha and controllers when updating @@ -2621,10 +2621,10 @@ Atomic OpenShift Utilities includes - Moving Dockerfile content to images dir (jupierce@redhat.com) * Mon May 08 2017 Jenkins CD Merge Bot 3.6.57-1 -- +- * Sun May 07 2017 Jenkins CD Merge Bot 3.6.56-1 -- +- * Sat May 06 2017 Jenkins CD Merge Bot 3.6.55-1 - Fix 1448368, and some other minors issues (ghuang@redhat.com) diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 417fb539a..c6fc75a50 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -75,7 +75,7 @@ If customization is required for the instances, scale groups, or any other confi In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example. -There are more examples of cluster inventory settings [`here`](../../inventory/byo/). +There are more examples of cluster inventory settings [`here`](../../inventory/). #### Step 0 (optional) @@ -138,7 +138,7 @@ $ ansible-playbook -i inventory.yml install.yml @provisioning_vars.yml ``` This playbook accomplishes the following: 1. Builds a dynamic inventory file by querying AWS. -2. Runs the [`byo`](../../common/openshift-cluster/config.yml) +2. Runs the [`deploy_cluster.yml`](../deploy_cluster.yml) Once this playbook completes, the cluster masters should be installed and configured. diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml deleted file mode 100644 index 4b74e5bce..000000000 --- a/playbooks/byo/config.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# TODO (rteague): Temporarily leaving this playbook to allow CI tests to operate until CI jobs are updated. -- import_playbook: ../deploy_cluster.yml diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml index d71b4f1c5..83d330284 100644 --- a/playbooks/openshift-logging/config.yml +++ b/playbooks/openshift-logging/config.yml @@ -1,7 +1,7 @@ --- # # This playbook is a preview of upcoming changes for installing -# Hosted logging on. See inventory/byo/hosts.*.example for the +# Hosted logging on. See inventory/hosts.example for the # currently supported method. # - import_playbook: ../init/main.yml diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 2a190935e..9f5502141 100644 --- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -56,7 +56,7 @@ - groups.oo_etcd_to_config | default([]) | length == 0 - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. - # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml + # This change will be reverted in playbooks/redeploy-certificates.yml - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: servingInfo.clientCA diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f567242cd..d361d6278 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -226,7 +226,7 @@ advanced configuration: [hardware-requirements]: https://docs.openshift.org/latest/install_config/install/prerequisites.html#hardware [origin]: https://www.openshift.org/ [centos7]: https://www.centos.org/ -[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.example +[sample-openshift-inventory]: https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example [advanced-configuration]: ./advanced-configuration.md [accessing-openshift]: ./advanced-configuration.md#accessing-the-openshift-cluster [uninstall-openshift]: ./advanced-configuration.md#removing-the-openshift-cluster diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index db2a13d38..403e0e1a7 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -343,7 +343,7 @@ installation for example by specifying the authentication. The full list of options is available in this sample inventory: -https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example +https://github.com/openshift/openshift-ansible/blob/master/inventory/hosts.example Note, that in order to deploy OpenShift origin, you should update the following variables for the `inventory/group_vars/OSEv3.yml`, `all.yml`: @@ -604,7 +604,7 @@ A library of custom post-provision actions exists in `openshift-ansible-contrib/ Once it succeeds, you can install openshift by running: - ansible-playbook openshift-ansible/playbooks/byo/config.yml + ansible-playbook openshift-ansible/playbooks/deploy_cluster.yml ## Access UI diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md index 6c5662a4e..94961f2d4 100644 --- a/roles/openshift_health_checker/HOWTO_CHECKS.md +++ b/roles/openshift_health_checker/HOWTO_CHECKS.md @@ -12,7 +12,7 @@ Checks are typically implemented as two parts: The checks are called from Ansible playbooks via the `openshift_health_check` action plugin. See -[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml) +[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml) for an example. The action plugin dynamically discovers all checks and executes only those diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 96de82669..974d9781a 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -164,14 +164,14 @@ away. If you want to install CFME/MIQ at the same time you install your OCP/Origin cluster, ensure that `openshift_management_install_management` is set to `true` in your inventory. Call the standard -`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ +`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ installation. If you are installing CFME/MIQ on an *already provisioned cluster* then you can call the CFME/MIQ playbook directly: ``` -$ ansible-playbook -v -i playbooks/byo/openshift-management/config.yml +$ ansible-playbook -v -i playbooks/openshift-management/config.yml ``` *Note: Use `miq-template` in the following examples for ManageIQ installs* @@ -489,7 +489,7 @@ This playbook will: ``` -$ ansible-playbook -v -i playbooks/byo/openshift-management/add_container_provider.yml +$ ansible-playbook -v -i playbooks/openshift-management/add_container_provider.yml ``` ## Multiple Providers @@ -567,7 +567,7 @@ the config file path. ``` $ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ - playbooks/byo/openshift-management/add_many_container_providers.yml + playbooks/openshift-management/add_many_container_providers.yml ``` Afterwards you will find two new container providers in your @@ -579,7 +579,7 @@ to see an overview. This role includes a playbook to uninstall and erase the CFME/MIQ installation: -* `playbooks/byo/openshift-management/uninstall.yml` +* `playbooks/openshift-management/uninstall.yml` NFS export definitions and data stored on NFS exports are not automatically removed. You are urged to manually erase any data from diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index e768961ce..b5e234b7f 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false # name and password AND are trying to use integration scripts. # # For example, adding this cluster as a container provider, -# playbooks/byo/openshift-management/add_container_provider.yml +# playbooks/openshift-management/add_container_provider.yml openshift_management_username: admin openshift_management_password: smartvm diff --git a/setup.py b/setup.py index 4a25d0bce..5ba050b83 100644 --- a/setup.py +++ b/setup.py @@ -345,35 +345,29 @@ class OpenShiftAnsibleSyntaxCheck(Command): print('-' * 60) print('Syntax checking playbook: {}'.format(playbook)) - # Error on any entry points in 'common' - if 'common' in playbook: - print('{}Invalid entry point playbook. All playbooks must' - ' start in playbooks/byo{}'.format(self.FAIL, self.ENDC)) - has_errors = True # --syntax-check each entry point playbook - else: - try: - # Create a host group list to avoid WARNING on unmatched host patterns - host_group_list = [ - 'etcd,masters,nodes,OSEv3', - 'oo_all_hosts', - 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,' - 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate', - 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes', - 'oo_nodes_to_config,oo_nodes_to_upgrade', - 'oo_nodes_use_kuryr,oo_nodes_use_flannel', - 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv', - 'oo_lb_to_config', - 'oo_nfs_to_config', - 'glusterfs,glusterfs_registry,'] - subprocess.check_output( - ['ansible-playbook', '-i ' + ','.join(host_group_list), - '--syntax-check', playbook] - ) - except subprocess.CalledProcessError as cpe: - print('{}Execution failed: {}{}'.format( - self.FAIL, cpe, self.ENDC)) - has_errors = True + try: + # Create a host group list to avoid WARNING on unmatched host patterns + host_group_list = [ + 'etcd,masters,nodes,OSEv3', + 'oo_all_hosts', + 'oo_etcd_to_config,oo_new_etcd_to_config,oo_first_etcd,oo_etcd_hosts_to_backup,' + 'oo_etcd_hosts_to_upgrade,oo_etcd_to_migrate', + 'oo_masters,oo_masters_to_config,oo_first_master,oo_containerized_master_nodes', + 'oo_nodes_to_config,oo_nodes_to_upgrade', + 'oo_nodes_use_kuryr,oo_nodes_use_flannel', + 'oo_nodes_use_calico,oo_nodes_use_nuage,oo_nodes_use_contiv', + 'oo_lb_to_config', + 'oo_nfs_to_config', + 'glusterfs,glusterfs_registry,'] + subprocess.check_output( + ['ansible-playbook', '-i ' + ','.join(host_group_list), + '--syntax-check', playbook] + ) + except subprocess.CalledProcessError as cpe: + print('{}Execution failed: {}{}'.format( + self.FAIL, cpe, self.ENDC)) + has_errors = True if has_errors: raise SystemExit(1) -- cgit v1.2.3