summaryrefslogtreecommitdiffstats
path: root/inventory/byo
diff options
context:
space:
mode:
Diffstat (limited to 'inventory/byo')
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example56
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example59
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example46
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.native-glusterfs.example)19
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example63
-rw-r--r--inventory/byo/hosts.origin.example208
-rw-r--r--inventory/byo/hosts.ose.example217
7 files changed, 462 insertions, 206 deletions
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
new file mode 100644
index 000000000..5a284ce97
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.external.example
@@ -0,0 +1,56 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster
+openshift_storage_glusterfs_is_native=False
+# Specify the IP address or hostname of the external heketi service
+openshift_storage_glusterfs_heketi_url=172.0.0.1
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
new file mode 100644
index 000000000..d16df6470
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.mixed.example
@@ -0,0 +1,59 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster and a native
+# heketi service
+openshift_storage_glusterfs_is_native=False
+openshift_storage_glusterfs_heketi_is_native=True
+# Specify that heketi will use SSH to communicate to the GlusterFS nodes and
+# the private key file it will use for authentication
+openshift_storage_glusterfs_heketi_executor=ssh
+openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
new file mode 100644
index 000000000..c1a1f6f84
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.native.example
@@ -0,0 +1,46 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for applications. It
+# will also autmatically create a StorageClass for this purpose.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
index dc847a5b2..31a85ee42 100644
--- a/inventory/byo/hosts.byo.native-glusterfs.example
+++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example
@@ -1,11 +1,12 @@
# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage.
+# with natively hosted, containerized GlusterFS storage for exclusive use
+# as storage for a natively hosted Docker registry.
#
# This inventory may be used with the byo/config.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -20,7 +21,7 @@
masters
nodes
# Specify there will be GlusterFS nodes
-glusterfs
+glusterfs_registry
[OSEv3:vars]
ansible_ssh_user=root
@@ -29,15 +30,15 @@ openshift_deployment_type=origin
openshift_hosted_registry_storage_kind=glusterfs
[masters]
-master node=True storage=True master=True
+master
[nodes]
-master node=True storage=True master=True openshift_schedulable=False
+master openshift_schedulable=False
# A hosted registry, by default, will only be deployed on nodes labeled
# "region=infra".
-node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
# Specify the glusterfs group, which contains the nodes that will host
# GlusterFS storage pods. At a minimum, each node must have a
@@ -45,7 +46,7 @@ node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedula
# devices the node will have access to that is intended solely for use as
# GlusterFS storage. These block devices must be bare (e.g. have no data, not
# be marked as LVM PVs), and will be formatted.
-[glusterfs]
+[glusterfs_registry]
node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
new file mode 100644
index 000000000..54bd89ddc
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
@@ -0,0 +1,63 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for both general
+# application use and a natively hosted Docker registry. It will also create a
+# StorageClass for the general storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+glusterfs_registry
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master
+
+[nodes]
+master openshift_schedulable=False
+# It is recommended to not use a single cluster for both general and registry
+# storage, so two three-node clusters will be required.
+node0 openshift_schedulable=True
+node1 openshift_schedulable=True
+node2 openshift_schedulable=True
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+
+[glusterfs_registry]
+node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index e6bc6c829..4a0630a69 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -34,17 +34,17 @@ openshift_deployment_type=origin
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -56,6 +56,9 @@ openshift_release=v3.6
#openshift_use_node_system_container=False
#openshift_use_master_system_container=False
#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
# Install the openshift examples
#openshift_install_examples=true
@@ -84,6 +87,13 @@ openshift_release=v3.6
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
@@ -101,10 +111,15 @@ openshift_release=v3.6
# The following options must not be used
# - openshift_docker_options
#openshift_docker_use_system_container=False
-# Force the registry to use for the system container. By default the registry
+# Instead of using docker, replacec it with cri-o
+# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
+# just as container-engine does.
+#openshift_use_crio=False
+# Force the registry to use for the docker/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
@@ -113,6 +128,11 @@ openshift_release=v3.6
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -151,8 +171,9 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Origin copr repo
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -359,45 +380,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
+
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
@@ -468,6 +453,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -504,10 +491,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
+#openshift_metrics_install_metrics=true
#
# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
@@ -517,29 +504,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
+#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
@@ -548,44 +535,49 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
+#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
+#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
@@ -595,13 +587,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
+#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin-
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -621,6 +613,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -640,8 +638,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 9
+# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
@@ -705,16 +707,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# interface other than the default network interface.
#openshift_set_node_ip=True
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -727,11 +724,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
-# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
-# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
-# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
-# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
#openshift_use_dnsmasq=False
+
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
@@ -740,6 +736,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -747,7 +747,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -765,9 +769,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_requests_memory=256Mi
#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512m
+#openshift_builddefaults_resources_limits_memory=512Mi
# Or you may optionally define your own build defaults configuration serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
@@ -862,6 +866,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
# host group for masters
[masters]
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 928da40fa..03fbcc63c 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -34,17 +34,17 @@ openshift_deployment_type=openshift-enterprise
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
-openshift_release=v3.6
+openshift_release=v3.7
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.6.0
+#openshift_image_tag=v3.7.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.6.0
+#openshift_pkg_version=-3.7.0
# This enables all the system containers except for docker:
#openshift_use_system_containers=False
@@ -56,8 +56,11 @@ openshift_release=v3.6
#openshift_use_node_system_container=False
#openshift_use_master_system_container=False
#openshift_use_etcd_system_container=False
+#
+# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="registry.access.redhat.com"
-# Install the openshift examples
+# Manage openshift example imagestreams and templates during install and upgrade
#openshift_install_examples=true
# Configure logoutURL in the master config for console customization
@@ -84,6 +87,13 @@ openshift_release=v3.6
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
+# Configure master API rate limits for external clients
+#openshift_master_external_ratelimit_qps=200
+#openshift_master_external_ratelimit_burst=400
+# Configure master API rate limits for loopback clients
+#openshift_master_loopback_ratelimit_qps=300
+#openshift_master_loopback_ratelimit_burst=600
+
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
@@ -101,14 +111,24 @@ openshift_release=v3.6
# The following options must not be used
# - openshift_docker_options
#openshift_docker_use_system_container=False
-# Force the registry to use for the system container. By default the registry
+# Install and run cri-o along side docker
+# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
+# just as container-engine does.
+#openshift_use_crio=False
+# Force the registry to use for the container-engine/crio system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
+#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
+#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
@@ -150,8 +170,17 @@ openshift_release=v3.6
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-# Additional yum repos to install
+# If oreg_url points to a registry requiring authentication, provide the following:
+#oreg_auth_user=some_user
+#oreg_auth_password='my-pass'
+# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
+# oreg_auth_pass should be generated from running docker login.
+# To update registry auth credentials, uncomment the following:
+#oreg_auth_credentials_replace: True
+
+# OpenShift repository configuration
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_repos_enable_testing=false
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
@@ -358,44 +387,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
-#openshift_hosted_routers:
-#- name: router1
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router1
-# images: "openshift3/ose-${component}:${version}"
-# edits: []
-# certificate:
-# certfile: /path/to/certificate/abc.crt
-# keyfile: /path/to/certificate/abc.key
-# cafile: /path/to/certificate/ca.crt
-#- name: router2
-# stats_port: 1936
-# ports:
-# - 80:80
-# - 443:443
-# replicas: 1
-# namespace: default
-# serviceaccount: router
-# selector: type=router2
-# images: "openshift3/ose-${component}:${version}"
-# certificate:
-# certfile: /path/to/certificate/xyz.crt
-# keyfile: /path/to/certificate/xyz.key
-# cafile: /path/to/certificate/ca.crt
-# edits:
-# # ROUTE_LABELS sets the router to listen for routes
-# # tagged with the provided values
-# - key: spec.template.spec.containers[0].env
-# value:
-# name: ROUTE_LABELS
-# value: "route=external"
-# action: append
+#
+#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
@@ -468,6 +461,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -504,10 +499,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
-# openshift_hosted_metrics_deploy=true
+#openshift_metrics_install_metrics=true
#
# Storage Options
-# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
+# If openshift_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
@@ -517,29 +512,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
-#openshift_hosted_metrics_storage_kind=nfs
-#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_metrics_storage_host=nfs.example.com
-#openshift_hosted_metrics_storage_nfs_directory=/exports
-#openshift_hosted_metrics_storage_volume_name=metrics
-#openshift_hosted_metrics_storage_volume_size=10Gi
-#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
+#openshift_metrics_storage_kind=nfs
+#openshift_metrics_storage_access_modes=['ReadWriteOnce']
+#openshift_metrics_storage_host=nfs.example.com
+#openshift_metrics_storage_nfs_directory=/exports
+#openshift_metrics_storage_volume_name=metrics
+#openshift_metrics_storage_volume_size=10Gi
+#openshift_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_metrics_storage_kind=dynamic
+#openshift_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
@@ -548,44 +543,49 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
-#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+#openshift_metrics_hawkular_hostname=https://hawkular-metrics.example.com/hawkular/metrics
# Configure the prefix and version for the component images
-#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_metrics_deployer_version=3.6.0
+#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/
+#openshift_metrics_image_version=3.7.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
+#
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_hosted_logging_deploy=true
+#openshift_logging_install_logging=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
-#openshift_hosted_logging_storage_kind=nfs
-#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_logging_storage_host=nfs.example.com
-#openshift_hosted_logging_storage_nfs_directory=/exports
-#openshift_hosted_logging_storage_volume_name=logging
-#openshift_hosted_logging_storage_volume_size=10Gi
-#openshift_hosted_logging_storage_labels={'storage': 'logging'}
+#openshift_logging_storage_kind=nfs
+#openshift_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_logging_storage_host=nfs.example.com
+#openshift_logging_storage_nfs_directory=/exports
+#openshift_logging_storage_volume_name=logging
+#openshift_logging_storage_volume_size=10Gi
+#openshift_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
-#openshift_hosted_logging_storage_kind=dynamic
+#openshift_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
@@ -595,13 +595,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_hosted_logging_hostname=logging.apps.example.com
+#openshift_logging_kibana_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
-#openshift_hosted_logging_deployer_version=3.6.0
+#openshift_logging_image_prefix=registry.example.com:8888/openshift3/
+#openshift_logging_image_version=3.7.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -621,6 +621,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_cluster_network_cidr: clusterNetworkCIDR
+# openshift_portal_net: serviceNetworkCIDR
+# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
+# Sane examples are provided below.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
@@ -640,8 +646,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-# Configure number of bits to allocate to each host’s subnet e.g. 9
+# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
+# When upgrading or scaling up the following must match whats in your master config!
+# Inventory: master yaml field
+# osm_host_subnet_length: hostSubnetLength
+# When installing osm_host_subnet_length must be set. A sane example is provided below.
#osm_host_subnet_length=9
# Configure master API and console ports.
@@ -705,16 +715,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# interface other than the default network interface.
#openshift_set_node_ip=True
-# Force setting of system hostname when configuring OpenShift
-# This works around issues related to installations that do not have valid dns
-# entries for the interfaces attached to the host.
-#openshift_set_hostname=True
-
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -727,10 +732,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
-# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
-# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
-# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
-# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
+# in versions >= 3.6
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
@@ -740,6 +743,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -747,7 +754,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -765,9 +776,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_requests_memory=256Mi
#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512m
+#openshift_builddefaults_resources_limits_memory=512Mi
# Or you may optionally define your own build defaults configuration serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
@@ -858,6 +869,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
# where as this would not
# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+#
+# Multiple data migrations take place and if they fail they will fail the upgrade
+# You may wish to disable these or make them non fatal
+#
+# openshift_upgrade_pre_storage_migration_enabled=true
+# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_post_storage_migration_enabled=true
+# openshift_upgrade_post_storage_migration_fatal==false
# host group for masters
[masters]