From 2bf65cdab4aa88f160d005d3b7649b22a6dceba8 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Fri, 8 Dec 2017 08:42:32 -0500 Subject: Cleanup byo references --- ...osts.byo.glusterfs.storage-and-registry.example | 67 ---------------------- 1 file changed, 67 deletions(-) delete mode 100644 inventory/byo/hosts.byo.glusterfs.storage-and-registry.example (limited to 'inventory/byo/hosts.byo.glusterfs.storage-and-registry.example') diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example deleted file mode 100644 index 9bd37cbf6..000000000 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ /dev/null @@ -1,67 +0,0 @@ -# This is an example of a bring your own (byo) host inventory for a cluster -# with natively hosted, containerized GlusterFS storage for both general -# application use and a natively hosted Docker registry. It will also create a -# StorageClass for the general storage. -# -# This inventory may be used with the byo/config.yml playbook to deploy a new -# cluster with GlusterFS storage. -# -# This inventory may also be used with byo/openshift-glusterfs/config.yml to -# deploy GlusterFS storage on an existing cluster. With this playbook, the -# registry backend volume will be created but the administrator must then -# either deploy a hosted registry or change an existing hosted registry to use -# that volume. -# -# There are additional configuration parameters that can be specified to -# control the deployment and state of a GlusterFS cluster. Please see the -# documentation in playbooks/byo/openshift-glusterfs/README.md and -# roles/openshift_storage_glusterfs/README.md for additional details. - -[OSEv3:children] -masters -nodes -etcd -# Specify there will be GlusterFS nodes -glusterfs -glusterfs_registry - -[OSEv3:vars] -ansible_ssh_user=root -openshift_deployment_type=origin -# Specify that we want to use GlusterFS storage for a hosted registry -openshift_hosted_registry_storage_kind=glusterfs - -[masters] -master - -[nodes] -master openshift_schedulable=False -# It is recommended to not use a single cluster for both general and registry -# storage, so two three-node clusters will be required. -node0 openshift_schedulable=True -node1 openshift_schedulable=True -node2 openshift_schedulable=True -# A hosted registry, by default, will only be deployed on nodes labeled -# "region=infra". -node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True - -[etcd] -master - -# Specify the glusterfs group, which contains the nodes that will host -# GlusterFS storage pods. At a minimum, each node must have a -# "glusterfs_devices" variable defined. This variable is a list of block -# devices the node will have access to that is intended solely for use as -# GlusterFS storage. These block devices must be bare (e.g. have no data, not -# be marked as LVM PVs), and will be formatted. -[glusterfs] -node0 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node1 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node2 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' - -[glusterfs_registry] -node3 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node4 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -node5 glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]' -- cgit v1.2.3