summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py8
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py12
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml479
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py27
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml3
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/templates/yum_repo.j24
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md9
-rw-r--r--roles/openshift_storage_nfs_lvm/defaults/main.yml7
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j22
13 files changed, 553 insertions, 30 deletions
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 2cd454274..1857433c7 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -3,6 +3,8 @@
Ansible module for partitioning.
"""
+from __future__ import print_function
+
# There is no pyparted on our Jenkins worker
# pylint: disable=import-error
import parted
@@ -131,7 +133,7 @@ def partition(diskname, specs, force=False, check_mode=False):
disk = None
if disk and len(disk.partitions) > 0 and not force:
- print "skipping", diskname
+ print("skipping", diskname)
return 0
# create new partition table, wiping all existing data
@@ -220,7 +222,7 @@ def main():
try:
specs = parse_spec(sizes)
- except ValueError, ex:
+ except ValueError as ex:
err = "Error parsing sizes=" + sizes + ": " + str(ex)
module.fail_json(msg=err)
@@ -229,7 +231,7 @@ def main():
for disk in disks.split(","):
try:
changed_count += partition(disk, specs, force, module.check_mode)
- except Exception, ex:
+ except Exception as ex:
err = "Error creating partitions on " + disk + ": " + str(ex)
raise
# module.fail_json(msg=err)
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 1fac284f2..7161b5277 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -371,7 +371,7 @@ an OpenShift Container Platform cluster
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
- for _, v in cert_meta.iteritems():
+ for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
@@ -654,9 +654,13 @@ an OpenShift Container Platform cluster
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
- check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..c8e3d4083
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 41ae07a48..05b0377bc 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -26,6 +26,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
+from six import string_types
+from six import text_type
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -87,7 +89,7 @@ def migrate_docker_facts(facts):
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
@@ -226,7 +228,7 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
@@ -363,7 +365,7 @@ def normalize_aws_facts(metadata, facts):
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -772,7 +774,7 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
@@ -1280,15 +1282,14 @@ def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
- ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
config = ConfigParser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
- # pylint: disable=bare-except
- except:
+ except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
@@ -1449,7 +1450,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1511,7 +1512,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1614,7 +1615,7 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
@@ -1636,7 +1637,7 @@ def set_proxy_facts(facts):
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
# make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
+ if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], string_types):
builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
@@ -2220,12 +2221,12 @@ class OpenShiftFacts(object):
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
index 13cef2d66..c47d5361d 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
@@ -72,7 +72,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -83,7 +82,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index ddfda1272..c67058696 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -81,7 +81,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -92,7 +91,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -103,7 +101,6 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
- kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index d5ed9c09d..23dcd0440 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,7 +37,7 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
- name: Configure origin yum repositories RHEL/CentOS
copy:
@@ -47,4 +47,4 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 2d9243545..0ec0045eb 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -2,9 +2,9 @@
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default('1') %}
+{% set enable_repo = repo.enabled | default(1,True) %}
enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+{% set enable_gpg_check = repo.gpgcheck | default(1,True) %}
gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
{% for key, value in repo.iteritems() %}
{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 8b8471745..cc674d3fd 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -48,6 +48,13 @@ osnl_volume_num_start: 3
# How many volumes/partitions to build, with the size we stated.
osnl_number_of_volumes: 2
+# osnl_volume_reclaim_policy
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle" (default).
+osnl_volume_reclaim_policy: "Recycle"
+
```
## Dependencies
@@ -71,6 +78,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
## Full example
@@ -96,6 +104,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
* Run the playbook:
```
diff --git a/roles/openshift_storage_nfs_lvm/defaults/main.yml b/roles/openshift_storage_nfs_lvm/defaults/main.yml
index f81cdc724..48352187c 100644
--- a/roles/openshift_storage_nfs_lvm/defaults/main.yml
+++ b/roles/openshift_storage_nfs_lvm/defaults/main.yml
@@ -8,3 +8,10 @@ osnl_mount_dir: /exports/openshift
# Volume Group to use.
osnl_volume_group: openshiftvg
+
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle".
+# See https://docs.openshift.com/enterprise/3.0/architecture/additional_concepts/storage.html#pv-recycling-policy
+osnl_volume_reclaim_policy: "Recycle"
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 3c4d2f56c..19e150f7d 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -12,7 +12,7 @@
"storage": "{{ osnl_volume_size }}Gi"
},
"accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
- "persistentVolumeReclaimPolicy": "Recycle",
+ "persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}",
"nfs": {
"Server": "{{ inventory_hostname }}",
"Path": "{{ osnl_mount_dir }}/{{ item }}"