summaryrefslogtreecommitdiffstats
path: root/inventory
diff options
context:
space:
mode:
Diffstat (limited to 'inventory')
-rw-r--r--inventory/byo/hosts.byo.glusterfs.external.example56
-rw-r--r--inventory/byo/hosts.byo.glusterfs.mixed.example59
-rw-r--r--inventory/byo/hosts.byo.glusterfs.native.example46
-rw-r--r--inventory/byo/hosts.byo.glusterfs.registry-only.example (renamed from inventory/byo/hosts.byo.native-glusterfs.example)11
-rw-r--r--inventory/byo/hosts.byo.glusterfs.storage-and-registry.example63
-rw-r--r--inventory/byo/hosts.origin.example59
-rw-r--r--inventory/byo/hosts.ose.example59
7 files changed, 344 insertions, 9 deletions
diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example
new file mode 100644
index 000000000..628d3a3f7
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.external.example
@@ -0,0 +1,56 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster
+openshift_storage_glusterfs_is_native=False
+# Specify the IP address or hostname of the external heketi service
+openshift_storage_glusterfs_heketi_url=172.0.0.1
+
+[masters]
+master node=True storage=True master=True
+
+[nodes]
+master node=True storage=True master=True openshift_schedulable=False
+node0 node=True openshift_schedulable=True
+node1 node=True openshift_schedulable=True
+node2 node=True openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example
new file mode 100644
index 000000000..fd47cb9d5
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.mixed.example
@@ -0,0 +1,59 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use an external GlusterFS cluster and a native
+# heketi service
+openshift_storage_glusterfs_is_native=False
+openshift_storage_glusterfs_heketi_is_native=True
+# Specify that heketi will use SSH to communicate to the GlusterFS nodes and
+# the private key file it will use for authentication
+openshift_storage_glusterfs_heketi_executor=ssh
+openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa
+[masters]
+master node=True storage=True master=True
+
+[nodes]
+master node=True storage=True master=True openshift_schedulable=False
+node0 node=True openshift_schedulable=True
+node1 node=True openshift_schedulable=True
+node2 node=True openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes of the external
+# GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname"
+# and "glusterfs_devices" variables defined.
+#
+# The first variable indicates the hostname of the external GLusterFS node,
+# and must be reachable by the external heketi service.
+#
+# The second variable is a list of block devices the node will have access to
+# that are intended solely for use as GlusterFS storage. These block devices
+# must be bare (e.g. have no data, not be marked as LVM PVs), and will be
+# formatted.
+[glusterfs]
+node0.local glusterfs_ip='172.0.0.10' glusterfs_devices='[ "/dev/vdb" ]'
+node1.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdb", "/dev/vdc" ]'
+node2.local glusterfs_ip='172.0.0.11' glusterfs_devices='[ "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example
new file mode 100644
index 000000000..a3e2570c9
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.native.example
@@ -0,0 +1,46 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for applications. It
+# will also autmatically create a StorageClass for this purpose.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+
+[masters]
+master node=True storage=True master=True
+
+[nodes]
+master node=True storage=True master=True openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0 node=True openshift_schedulable=True
+node1 node=True openshift_schedulable=True
+node2 node=True openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.native-glusterfs.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example
index 2dbb57d40..999518abe 100644
--- a/inventory/byo/hosts.byo.native-glusterfs.example
+++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example
@@ -1,11 +1,12 @@
# This is an example of a bring your own (byo) host inventory for a cluster
-# with natively hosted, containerized GlusterFS storage.
+# with natively hosted, containerized GlusterFS storage for exclusive use
+# as storage for a natively hosted Docker registry.
#
# This inventory may be used with the byo/config.yml playbook to deploy a new
# cluster with GlusterFS storage, which will use that storage to create a
# volume that will provide backend storage for a hosted Docker registry.
#
-# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# This inventory may also be used with byo/openshift-glusterfs/registry.yml to
# deploy GlusterFS storage on an existing cluster. With this playbook, the
# registry backend volume will be created but the administrator must then
# either deploy a hosted registry or change an existing hosted registry to use
@@ -20,11 +21,11 @@
masters
nodes
# Specify there will be GlusterFS nodes
-glusterfs
+glusterfs_registry
[OSEv3:vars]
ansible_ssh_user=root
-deployment_type=origin
+openshift_deployment_type=origin
# Specify that we want to use GlusterFS storage for a hosted registry
openshift_hosted_registry_storage_kind=glusterfs
@@ -45,7 +46,7 @@ node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedula
# devices the node will have access to that is intended solely for use as
# GlusterFS storage. These block devices must be bare (e.g. have no data, not
# be marked as LVM PVs), and will be formatted.
-[glusterfs]
+[glusterfs_registry]
node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
new file mode 100644
index 000000000..1df79301a
--- /dev/null
+++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example
@@ -0,0 +1,63 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage for both general
+# application use and a natively hosted Docker registry. It will also create a
+# StorageClass for the general storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+glusterfs_registry
+
+[OSEv3:vars]
+ansible_ssh_user=root
+openshift_deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master node=True storage=True master=True
+
+[nodes]
+master node=True storage=True master=True openshift_schedulable=False
+# It is recommended to not use a single cluster for both general and registry
+# storage, so two three-node clusters will be required.
+node0 node=True openshift_schedulable=True
+node1 node=True openshift_schedulable=True
+node2 node=True openshift_schedulable=True
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node3 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node4 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node5 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+
+[glusterfs_registry]
+node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index b2490638b..6e53b4fd9 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -10,6 +10,10 @@ nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
@@ -42,6 +46,17 @@ openshift_release=v3.6
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.6.0
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+
# Install the openshift examples
#openshift_install_examples=true
@@ -98,6 +113,11 @@ openshift_release=v3.6
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -191,6 +211,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -446,6 +473,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -501,6 +530,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +542,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -528,6 +559,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+#
# Logging deployment
#
@@ -545,6 +581,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +593,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -688,7 +726,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -714,6 +752,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), and hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -721,7 +763,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -782,6 +828,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 1.3
#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
# by deployment_type=origin
@@ -798,6 +850,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
# Upgrade Control
#
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 67d53b22d..e3e9220fc 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -10,6 +10,10 @@ nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
+# Enable unsupported configurations, things that will yield a partially
+# functioning cluster but would not be supported for production use
+#openshift_enable_unsupported_configurations=false
+
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
@@ -42,6 +46,17 @@ openshift_release=v3.6
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.6.0
+# This enables all the system containers except for docker:
+#openshift_use_system_containers=False
+#
+# But you can choose separately each component that must be a
+# system container:
+#
+#openshift_use_openvswitch_system_container=False
+#openshift_use_node_system_container=False
+#openshift_use_master_system_container=False
+#openshift_use_etcd_system_container=False
+
# Install the openshift examples
#openshift_install_examples=true
@@ -94,6 +109,11 @@ openshift_release=v3.6
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
+# Uncomment below to disable; for example if your kernel does not support the
+# Docker overlay/overlay2 storage drivers with SELinux enabled.
+#openshift_docker_selinux_enabled=False
+
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
@@ -190,6 +210,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
+# CloudForms Management Engine (ManageIQ) App Install
+#
+# Enables installation of MIQ server. Recommended for dedicated
+# clusters only. See roles/openshift_cfme/README.md for instructions
+# and requirements.
+#openshift_cfme_install_app=False
+
# Cloud Provider Configuration
#
# Note: You may make use of environment variables rather than store
@@ -446,6 +473,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
+#openshift_hosted_registry_storage_s3_encrypt=false
+#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
@@ -501,6 +530,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -512,6 +542,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
+#openshift_hosted_metrics_storage_labels={'storage': 'metrics'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -528,6 +559,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure the prefix and version for the component images
#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_metrics_deployer_version=3.6.0
+#
+# StorageClass
+# openshift_storageclass_name=gp2
+# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': false}
+#
# Logging deployment
#
@@ -545,6 +581,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
@@ -556,6 +593,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
+#openshift_hosted_logging_storage_labels={'storage': 'logging'}
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
@@ -688,7 +726,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
@@ -714,6 +752,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
+#
+# Hosts in the openshift_no_proxy list will NOT use any globally
+# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
+# (.example.com), hosts (example.com), and IP addresses.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
@@ -721,7 +763,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above.
+# specify that domain above instead.
+#
+# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
+# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
+# variable (above) and set this value to False
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
@@ -782,6 +828,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Enable API service auditing, available as of 3.2
#openshift_master_audit_config={"enabled": true}
+#
+# In case you want more advanced setup for the auditlog you can
+# use this line.
+# The directory in "auditFilePath" will be created if it's not
+# exist
+#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
@@ -794,6 +846,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
+#
+# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
+# openshift_master_saconfig_limitsecretreferences=false
# Upgrade Control
#