summaryrefslogtreecommitdiffstats
path: root/roles/lib_openshift/src/class
diff options
context:
space:
mode:
Diffstat (limited to 'roles/lib_openshift/src/class')
-rw-r--r--roles/lib_openshift/src/class/oc_adm_ca_server_cert.py135
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py421
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py500
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py1
-rw-r--r--roles/lib_openshift/src/class/oc_route.py8
-rw-r--r--roles/lib_openshift/src/class/oc_sdnvalidator.py58
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py2
-rw-r--r--roles/lib_openshift/src/class/oc_service.py6
8 files changed, 1124 insertions, 7 deletions
diff --git a/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
new file mode 100644
index 000000000..6ed1f2f35
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_ca_server_cert.py
@@ -0,0 +1,135 @@
+# pylint: skip-file
+# flake8: noqa
+
+class CAServerCertConfig(OpenShiftCLIConfig):
+ ''' CAServerCertConfig is a DTO for the oc adm ca command '''
+ def __init__(self, kubeconfig, verbose, ca_options):
+ super(CAServerCertConfig, self).__init__('ca', None, kubeconfig, ca_options)
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._ca = ca_options
+
+
+class CAServerCert(OpenShiftCLI):
+ ''' Class to wrap the oc adm ca create-server-cert command line'''
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for oadm ca '''
+ super(CAServerCert, self).__init__(None, config.kubeconfig, verbose)
+ self.config = config
+ self.verbose = verbose
+
+ def get(self):
+ '''get the current cert file
+
+ If a file exists by the same name in the specified location then the cert exists
+ '''
+ cert = self.config.config_options['cert']['value']
+ if cert and os.path.exists(cert):
+ return open(cert).read()
+
+ return None
+
+ def create(self):
+ '''run openshift oc adm ca create-server-cert cmd'''
+
+ # Added this here as a safegaurd for stomping on the
+ # cert and key files if they exist
+ if self.config.config_options['backup']['value']:
+ import time
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
+ date_str = "%s_" + "%s" % ext
+
+ if os.path.exists(self.config.config_options['key']['value']):
+ shutil.copy(self.config.config_options['key']['value'],
+ date_str % self.config.config_options['key']['value'])
+ if os.path.exists(self.config.config_options['cert']['value']):
+ shutil.copy(self.config.config_options['cert']['value'],
+ date_str % self.config.config_options['cert']['value'])
+
+ options = self.config.to_option_list()
+
+ cmd = ['ca', 'create-server-cert']
+ cmd.extend(options)
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def exists(self):
+ ''' check whether the certificate exists and has the clusterIP '''
+
+ cert_path = self.config.config_options['cert']['value']
+ if not os.path.exists(cert_path):
+ return False
+
+ # Would prefer pyopenssl but is not installed.
+ # When we verify it is, switch this code
+ # Here is the code to get the subject and the SAN
+ # openssl x509 -text -noout -certopt \
+ # no_header,no_version,no_serial,no_signame,no_validity,no_issuer,no_pubkey,no_sigdump,no_aux \
+ # -in /etc/origin/master/registry.crt
+ # Instead of this solution we will use a regex.
+ cert_names = []
+ hostnames = self.config.config_options['hostnames']['value'].split(',')
+ proc = subprocess.Popen(['openssl', 'x509', '-noout', '-text', '-in', cert_path],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ x509output, _ = proc.communicate()
+ if proc.returncode == 0:
+ regex = re.compile(r"^\s*X509v3 Subject Alternative Name:\s*?\n\s*(.*)\s*\n", re.MULTILINE)
+ match = regex.search(x509output) # E501
+ for entry in re.split(r", *", match.group(1)):
+ if entry.startswith('DNS') or entry.startswith('IP Address'):
+ cert_names.append(entry.split(':')[1])
+ # now that we have cert names let's compare
+ cert_set = set(cert_names)
+ hname_set = set(hostnames)
+ if cert_set.issubset(hname_set) and hname_set.issubset(cert_set):
+ return True
+
+ return False
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ config = CAServerCertConfig(params['kubeconfig'],
+ params['debug'],
+ {'cert': {'value': params['cert'], 'include': True},
+ 'hostnames': {'value': ','.join(params['hostnames']), 'include': True},
+ 'overwrite': {'value': True, 'include': True},
+ 'key': {'value': params['key'], 'include': True},
+ 'signer_cert': {'value': params['signer_cert'], 'include': True},
+ 'signer_key': {'value': params['signer_key'], 'include': True},
+ 'signer_serial': {'value': params['signer_serial'], 'include': True},
+ 'backup': {'value': params['backup'], 'include': False},
+ })
+
+ server_cert = CAServerCert(config)
+
+ state = params['state']
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not server_cert.exists() or params['force']:
+
+ if check_mode:
+ return {'changed': True,
+ 'msg': "CHECK_MODE: Would have created the certificate.",
+ 'state': state}
+
+ api_rval = server_cert.create()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Exists
+ ########
+ api_rval = server_cert.get()
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'msg': 'Unknown state passed. %s' % state}
+
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
new file mode 100644
index 000000000..37904c43f
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -0,0 +1,421 @@
+# pylint: skip-file
+# flake8: noqa
+
+class RegistryException(Exception):
+ ''' Registry Exception Class '''
+ pass
+
+
+class RegistryConfig(OpenShiftCLIConfig):
+ ''' RegistryConfig is a DTO for the registry. '''
+ def __init__(self, rname, namespace, kubeconfig, registry_options):
+ super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
+
+
+class Registry(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
+ volume_path = 'spec.template.spec.volumes'
+ env_path = 'spec.template.spec.containers[0].env'
+
+ def __init__(self,
+ registry_config,
+ verbose=False):
+ ''' Constructor for Registry
+
+ a registry consists of 3 or more parts
+ - dc/docker-registry
+ - svc/docker-registry
+
+ Parameters:
+ :registry_config:
+ :verbose:
+ '''
+ super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
+ self.version = OCVersion(registry_config.kubeconfig, verbose)
+ self.svc_ip = None
+ self.portal_ip = None
+ self.config = registry_config
+ self.verbose = verbose
+ self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ ]
+
+ self.__prepared_registry = None
+ self.volume_mounts = []
+ self.volumes = []
+ if self.config.config_options['volume_mounts']['value']:
+ for volume in self.config.config_options['volume_mounts']['value']:
+ volume_info = {'secret_name': volume.get('secret_name', None),
+ 'name': volume.get('name', None),
+ 'type': volume.get('type', None),
+ 'path': volume.get('path', None),
+ 'claimName': volume.get('claim_name', None),
+ 'claimSize': volume.get('claim_size', None),
+ }
+
+ vol, vol_mount = Volume.create_volume_structure(volume_info)
+ self.volumes.append(vol)
+ self.volume_mounts.append(vol_mount)
+
+ self.dconfig = None
+ self.svc = None
+
+ @property
+ def deploymentconfig(self):
+ ''' deploymentconfig property '''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for deploymentconfig property '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' service property '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for service property '''
+ self.svc = config
+
+ @property
+ def prepared_registry(self):
+ ''' prepared_registry property '''
+ if not self.__prepared_registry:
+ results = self.prepare_registry()
+ if not results:
+ raise RegistryException('Could not perform registry preparation.')
+ self.__prepared_registry = results
+
+ return self.__prepared_registry
+
+ @prepared_registry.setter
+ def prepared_registry(self, data):
+ ''' setter method for prepared_registry attribute '''
+ self.__prepared_registry = data
+
+ def get(self):
+ ''' return the self.registry_parts '''
+ self.deploymentconfig = None
+ self.service = None
+
+ rval = 0
+ for part in self.registry_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(result['results'][0])
+
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+
+ return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
+
+ def exists(self):
+ '''does the object exist?'''
+ self.get()
+ if self.deploymentconfig and self.service:
+ return True
+
+ return False
+
+ def delete(self, complete=True):
+ '''return all pods '''
+ parts = []
+ for part in self.registry_parts:
+ if not complete and part['kind'] == 'svc':
+ continue
+ parts.append(self._delete(part['kind'], part['name']))
+
+ # Clean up returned results
+ rval = 0
+ for part in parts:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in part and part['returncode'] != 0:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def prepare_registry(self):
+ ''' prepare a registry for instantiation '''
+ options = self.config.to_option_list()
+
+ cmd = ['registry', '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+ # probably need to parse this
+ # pylint thinks results is a string
+ # pylint: disable=no-member
+ if results['returncode'] != 0 and 'items' in results['results']:
+ return results
+
+ service = None
+ deploymentconfig = None
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ deploymentconfig = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ service = Service(res)
+
+ # Verify we got a service and a deploymentconfig
+ if not service or not deploymentconfig:
+ return results
+
+ # results will need to get parsed here and modifications added
+ deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
+
+ # modify service ip
+ if self.svc_ip:
+ service.put('spec.clusterIP', self.svc_ip)
+ if self.portal_ip:
+ service.put('spec.portalIP', self.portal_ip)
+
+ # the dry-run doesn't apply the selector correctly
+ service.put('spec.selector', self.service.get_selector())
+
+ # need to create the service and the deploymentconfig
+ service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
+ deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
+
+ return {"service": service,
+ "service_file": service_file,
+ "service_update": False,
+ "deployment": deploymentconfig,
+ "deployment_file": deployment_file,
+ "deployment_update": False}
+
+ def create(self):
+ '''Create a registry'''
+ results = []
+ self.needs_update()
+ # if the object is none, then we need to create it
+ # if the object needs an update, then we should call replace
+ # Handle the deploymentconfig
+ if self.deploymentconfig is None:
+ results.append(self._create(self.prepared_registry['deployment_file']))
+ elif self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+
+ # Handle the service
+ if self.service is None:
+ results.append(self._create(self.prepared_registry['service_file']))
+ elif self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ # pylint: disable=invalid-sequence-index
+ if 'returncode' in result and result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the registry. This performs a replace if required'''
+ # Store the current service IP
+ if self.service:
+ svcip = self.service.get('spec.clusterIP')
+ if svcip:
+ self.svc_ip = svcip
+ portip = self.service.get('spec.portalIP')
+ if portip:
+ self.portal_ip = portip
+
+ results = []
+ if self.prepared_registry['deployment_update']:
+ results.append(self._replace(self.prepared_registry['deployment_file']))
+ if self.prepared_registry['service_update']:
+ results.append(self._replace(self.prepared_registry['service_file']))
+
+ # Clean up returned results
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def add_modifications(self, deploymentconfig):
+ ''' update a deployment config with changes '''
+ # The environment variable for REGISTRY_HTTP_SECRET is autogenerated
+ # We should set the generated deploymentconfig to the in memory version
+ # the following modifications will overwrite if needed
+ if self.deploymentconfig:
+ result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
+ if result:
+ deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
+
+ # Currently we know that our deployment of a registry requires a few extra modifications
+ # Modification 1
+ # we need specific environment variables to be set
+ for key, value in self.config.config_options['env_vars'].get('value', {}).items():
+ if not deploymentconfig.exists_env_key(key):
+ deploymentconfig.add_env_value(key, value)
+ else:
+ deploymentconfig.update_env_var(key, value)
+
+ # Modification 2
+ # we need specific volume variables to be set
+ for volume in self.volumes:
+ deploymentconfig.update_volume(volume)
+
+ for vol_mount in self.volume_mounts:
+ deploymentconfig.update_volume_mount(vol_mount)
+
+ # Modification 3
+ # Edits
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig.yaml_dict
+
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
+ self.service.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['service_update'] = True
+
+ exclude_list = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath',
+ 'securityContext',
+ 'imagePullPolicy',
+ 'protocol', # ports.portocol: TCP
+ 'type', # strategy: {'type': 'rolling'}
+ 'defaultMode', # added on secrets
+ 'activeDeadlineSeconds', # added in 1.5 for timeouts
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ exclude_list,
+ debug=self.verbose):
+ self.prepared_registry['deployment_update'] = True
+
+ return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
+
+ # In the future, we would like to break out each ansible state into a function.
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run idempotent ansible code'''
+
+ rconfig = RegistryConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'mount_host': {'value': params['mount_host'], 'include': True},
+ 'env_vars': {'value': params['env_vars'], 'include': False},
+ 'volume_mounts': {'value': params['volume_mounts'], 'include': False},
+ 'edits': {'value': params['edits'], 'include': False},
+ 'enforce_quota': {'value': params['enforce_quota'], 'include': True},
+ 'daemonset': {'value': params['daemonset'], 'include': True},
+ 'tls_key': {'value': params['tls_key'], 'include': True},
+ 'tls_certificate': {'value': params['tls_certificate'], 'include': True},
+ })
+
+
+ ocregistry = Registry(rconfig, params['debug'])
+
+ api_rval = ocregistry.get()
+
+ state = params['state']
+ ########
+ # get
+ ########
+ if state == 'list':
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocregistry.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # Unsure as to why this is angry with the return type.
+ # pylint: disable=redefined-variable-type
+ api_rval = ocregistry.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocregistry.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocregistry.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not params['force'] and not ocregistry.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocregistry.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
new file mode 100644
index 000000000..7b163b120
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -0,0 +1,500 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class RouterException(Exception):
+ ''' Router exception'''
+ pass
+
+
+class RouterConfig(OpenShiftCLIConfig):
+ ''' RouterConfig is a DTO for the router. '''
+ def __init__(self, rname, namespace, kubeconfig, router_options):
+ super(RouterConfig, self).__init__(rname, namespace, kubeconfig, router_options)
+
+
+class Router(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ router_config,
+ verbose=False):
+ ''' Constructor for OpenshiftOC
+
+ a router consists of 3 or more parts
+ - dc/router
+ - svc/router
+ - sa/router
+ - secret/router-certs
+ - clusterrolebinding/router-router-role
+ '''
+ super(Router, self).__init__('default', router_config.kubeconfig, verbose)
+ self.config = router_config
+ self.verbose = verbose
+ self.router_parts = [{'kind': 'dc', 'name': self.config.name},
+ {'kind': 'svc', 'name': self.config.name},
+ {'kind': 'sa', 'name': self.config.config_options['service_account']['value']},
+ {'kind': 'secret', 'name': self.config.name + '-certs'},
+ {'kind': 'clusterrolebinding', 'name': 'router-' + self.config.name + '-role'},
+ ]
+
+ self.__prepared_router = None
+ self.dconfig = None
+ self.svc = None
+ self._secret = None
+ self._serviceaccount = None
+ self._rolebinding = None
+
+ @property
+ def prepared_router(self):
+ ''' property for the prepared router'''
+ if self.__prepared_router is None:
+ results = self._prepare_router()
+ if not results or 'returncode' in results and results['returncode'] != 0:
+ if 'stderr' in results:
+ raise RouterException('Could not perform router preparation: %s' % results['stderr'])
+
+ raise RouterException('Could not perform router preparation.')
+ self.__prepared_router = results
+
+ return self.__prepared_router
+
+ @prepared_router.setter
+ def prepared_router(self, obj):
+ '''setter for the prepared_router'''
+ self.__prepared_router = obj
+
+ @property
+ def deploymentconfig(self):
+ ''' property deploymentconfig'''
+ return self.dconfig
+
+ @deploymentconfig.setter
+ def deploymentconfig(self, config):
+ ''' setter for property deploymentconfig '''
+ self.dconfig = config
+
+ @property
+ def service(self):
+ ''' property for service '''
+ return self.svc
+
+ @service.setter
+ def service(self, config):
+ ''' setter for property service '''
+ self.svc = config
+
+ @property
+ def secret(self):
+ ''' property secret '''
+ return self._secret
+
+ @secret.setter
+ def secret(self, config):
+ ''' setter for property secret '''
+ self._secret = config
+
+ @property
+ def serviceaccount(self):
+ ''' property for serviceaccount '''
+ return self._serviceaccount
+
+ @serviceaccount.setter
+ def serviceaccount(self, config):
+ ''' setter for property serviceaccount '''
+ self._serviceaccount = config
+
+ @property
+ def rolebinding(self):
+ ''' property rolebinding '''
+ return self._rolebinding
+
+ @rolebinding.setter
+ def rolebinding(self, config):
+ ''' setter for property rolebinding '''
+ self._rolebinding = config
+
+ def get_object_by_kind(self, kind):
+ '''return the current object kind by name'''
+ if re.match("^(dc|deploymentconfig)$", kind, flags=re.IGNORECASE):
+ return self.deploymentconfig
+ elif re.match("^(svc|service)$", kind, flags=re.IGNORECASE):
+ return self.service
+ elif re.match("^(sa|serviceaccount)$", kind, flags=re.IGNORECASE):
+ return self.serviceaccount
+ elif re.match("secret", kind, flags=re.IGNORECASE):
+ return self.secret
+ elif re.match("clusterrolebinding", kind, flags=re.IGNORECASE):
+ return self.rolebinding
+
+ return None
+
+ def get(self):
+ ''' return the self.router_parts '''
+ self.service = None
+ self.deploymentconfig = None
+ self.serviceaccount = None
+ self.secret = None
+ self.rolebinding = None
+ for part in self.router_parts:
+ result = self._get(part['kind'], rname=part['name'])
+ if result['returncode'] == 0 and part['kind'] == 'dc':
+ self.deploymentconfig = DeploymentConfig(result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'svc':
+ self.service = Service(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'sa':
+ self.serviceaccount = ServiceAccount(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'secret':
+ self.secret = Secret(content=result['results'][0])
+ elif result['returncode'] == 0 and part['kind'] == 'clusterrolebinding':
+ self.rolebinding = RoleBinding(content=result['results'][0])
+
+ return {'deploymentconfig': self.deploymentconfig,
+ 'service': self.service,
+ 'serviceaccount': self.serviceaccount,
+ 'secret': self.secret,
+ 'clusterrolebinding': self.rolebinding,
+ }
+
+ def exists(self):
+ '''return a whether svc or dc exists '''
+ if self.deploymentconfig and self.service and self.secret and self.serviceaccount:
+ return True
+
+ return False
+
+ def delete(self):
+ '''return all pods '''
+ parts = []
+ for part in self.router_parts:
+ parts.append(self._delete(part['kind'], part['name']))
+
+ rval = 0
+ for part in parts:
+ if part['returncode'] != 0 and not 'already exist' in part['stderr']:
+ rval = part['returncode']
+
+ return {'returncode': rval, 'results': parts}
+
+ def add_modifications(self, deploymentconfig):
+ '''modify the deployment config'''
+ # We want modifications in the form of edits coming in from the module.
+ # Let's apply these here
+ edit_results = []
+ for edit in self.config.config_options['edits'].get('value', []):
+ if edit['action'] == 'put':
+ edit_results.append(deploymentconfig.put(edit['key'],
+ edit['value']))
+ if edit['action'] == 'update':
+ edit_results.append(deploymentconfig.update(edit['key'],
+ edit['value'],
+ edit.get('index', None),
+ edit.get('curr_value', None)))
+ if edit['action'] == 'append':
+ edit_results.append(deploymentconfig.append(edit['key'],
+ edit['value']))
+
+ if edit_results and not any([res[0] for res in edit_results]):
+ return None
+
+ return deploymentconfig
+
+ # pylint: disable=too-many-branches
+ def _prepare_router(self):
+ '''prepare router for instantiation'''
+ # if cacert, key, and cert were passed, combine them into a pem file
+ if (self.config.config_options['cacert_file']['value'] and
+ self.config.config_options['cert_file']['value'] and
+ self.config.config_options['key_file']['value']):
+
+ router_pem = '/tmp/router.pem'
+ with open(router_pem, 'w') as rfd:
+ rfd.write(open(self.config.config_options['cert_file']['value']).read())
+ rfd.write(open(self.config.config_options['key_file']['value']).read())
+ if self.config.config_options['cacert_file']['value'] and \
+ os.path.exists(self.config.config_options['cacert_file']['value']):
+ rfd.write(open(self.config.config_options['cacert_file']['value']).read())
+
+ atexit.register(Utils.cleanup, [router_pem])
+
+ self.config.config_options['default_cert']['value'] = router_pem
+
+ elif self.config.config_options['default_cert']['value'] is None:
+ # No certificate was passed to us. do not pass one to oc adm router
+ self.config.config_options['default_cert']['include'] = False
+
+ options = self.config.to_option_list()
+
+ cmd = ['router', self.config.name, '-n', self.config.namespace]
+ cmd.extend(options)
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
+
+ # pylint: disable=maybe-no-member
+ if results['returncode'] != 0 or 'items' not in results['results']:
+ return results
+
+ oc_objects = {'DeploymentConfig': {'obj': None, 'path': None, 'update': False},
+ 'Secret': {'obj': None, 'path': None, 'update': False},
+ 'ServiceAccount': {'obj': None, 'path': None, 'update': False},
+ 'ClusterRoleBinding': {'obj': None, 'path': None, 'update': False},
+ 'Service': {'obj': None, 'path': None, 'update': False},
+ }
+ # pylint: disable=invalid-sequence-index
+ for res in results['results']['items']:
+ if res['kind'] == 'DeploymentConfig':
+ oc_objects['DeploymentConfig']['obj'] = DeploymentConfig(res)
+ elif res['kind'] == 'Service':
+ oc_objects['Service']['obj'] = Service(res)
+ elif res['kind'] == 'ServiceAccount':
+ oc_objects['ServiceAccount']['obj'] = ServiceAccount(res)
+ elif res['kind'] == 'Secret':
+ oc_objects['Secret']['obj'] = Secret(res)
+ elif res['kind'] == 'ClusterRoleBinding':
+ oc_objects['ClusterRoleBinding']['obj'] = RoleBinding(res)
+
+ # Currently only deploymentconfig needs updating
+ # Verify we got a deploymentconfig
+ if not oc_objects['DeploymentConfig']['obj']:
+ return results
+
+ # add modifications added
+ oc_objects['DeploymentConfig']['obj'] = self.add_modifications(oc_objects['DeploymentConfig']['obj'])
+
+ for oc_type, oc_data in oc_objects.items():
+ if oc_data['obj'] is not None:
+ oc_data['path'] = Utils.create_tmp_file_from_contents(oc_type, oc_data['obj'].yaml_dict)
+
+ return oc_objects
+
+ def create(self):
+ '''Create a router
+
+ This includes the different parts:
+ - deploymentconfig
+ - service
+ - serviceaccount
+ - secrets
+ - clusterrolebinding
+ '''
+ results = []
+ self.needs_update()
+
+ import time
+ # pylint: disable=maybe-no-member
+ for kind, oc_data in self.prepared_router.items():
+ if oc_data['obj'] is not None:
+ time.sleep(1)
+ if self.get_object_by_kind(kind) is None:
+ results.append(self._create(oc_data['path']))
+
+ elif oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0 and not 'already exist' in result['stderr']:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ def update(self):
+ '''run update for the router. This performs a replace'''
+ results = []
+
+ # pylint: disable=maybe-no-member
+ for _, oc_data in self.prepared_router.items():
+ if oc_data['update']:
+ results.append(self._replace(oc_data['path']))
+
+ rval = 0
+ for result in results:
+ if result['returncode'] != 0:
+ rval = result['returncode']
+
+ return {'returncode': rval, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ def needs_update(self):
+ ''' check to see if we need to update '''
+ # ServiceAccount:
+ # Need to determine changes from the pregenerated ones from the original
+ # Since these are auto generated, we can skip
+ skip = ['secrets', 'imagePullSecrets']
+ if self.serviceaccount is None or \
+ not Utils.check_def_equal(self.prepared_router['ServiceAccount']['obj'].yaml_dict,
+ self.serviceaccount.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['ServiceAccount']['update'] = True
+
+ # Secret:
+ # See if one was generated from our dry-run and verify it if needed
+ if self.prepared_router['Secret']['obj']:
+ if not self.secret:
+ self.prepared_router['Secret']['update'] = True
+
+ if self.secret is None or \
+ not Utils.check_def_equal(self.prepared_router['Secret']['obj'].yaml_dict,
+ self.secret.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Secret']['update'] = True
+
+ # Service:
+ # Fix the ports to have protocol=TCP
+ for port in self.prepared_router['Service']['obj'].get('spec.ports'):
+ port['protocol'] = 'TCP'
+
+ skip = ['portalIP', 'clusterIP', 'sessionAffinity', 'type']
+ if self.service is None or \
+ not Utils.check_def_equal(self.prepared_router['Service']['obj'].yaml_dict,
+ self.service.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['Service']['update'] = True
+
+ # DeploymentConfig:
+ # Router needs some exceptions.
+ # We do not want to check the autogenerated password for stats admin
+ if self.deploymentconfig is not None:
+ if not self.config.config_options['stats_password']['value']:
+ for idx, env_var in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].env') or []):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ self.deploymentconfig.get('spec.template.spec.containers[0].env[%s].value' % idx)
+ break
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(self.prepared_router['DeploymentConfig']['obj'].get(\
+ 'spec.template.spec.containers[0].ports') or []):
+ if not 'protocol' in port:
+ port['protocol'] = 'TCP'
+
+ # These are different when generating
+ skip = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath', 'hostPort',
+ 'defaultMode',
+ ]
+
+ if self.deploymentconfig is None or \
+ not Utils.check_def_equal(self.prepared_router['DeploymentConfig']['obj'].yaml_dict,
+ self.deploymentconfig.yaml_dict,
+ skip_keys=skip,
+ debug=self.verbose):
+ self.prepared_router['DeploymentConfig']['update'] = True
+
+ # Check if any of the parts need updating, if so, return True
+ # else, no need to update
+ # pylint: disable=no-member
+ return any([self.prepared_router[oc_type]['update'] for oc_type in self.prepared_router.keys()])
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run ansible idempotent code'''
+
+ rconfig = RouterConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ {'default_cert': {'value': params['default_cert'], 'include': True},
+ 'cert_file': {'value': params['cert_file'], 'include': False},
+ 'key_file': {'value': params['key_file'], 'include': False},
+ 'images': {'value': params['images'], 'include': True},
+ 'latest_images': {'value': params['latest_images'], 'include': True},
+ 'labels': {'value': params['labels'], 'include': True},
+ 'ports': {'value': ','.join(params['ports']), 'include': True},
+ 'replicas': {'value': params['replicas'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'service_account': {'value': params['service_account'], 'include': True},
+ 'router_type': {'value': params['router_type'], 'include': False},
+ 'host_network': {'value': params['host_network'], 'include': True},
+ 'external_host': {'value': params['external_host'], 'include': True},
+ 'external_host_vserver': {'value': params['external_host_vserver'],
+ 'include': True},
+ 'external_host_insecure': {'value': params['external_host_insecure'],
+ 'include': True},
+ 'external_host_partition_path': {'value': params['external_host_partition_path'],
+ 'include': True},
+ 'external_host_username': {'value': params['external_host_username'],
+ 'include': True},
+ 'external_host_password': {'value': params['external_host_password'],
+ 'include': True},
+ 'external_host_private_key': {'value': params['external_host_private_key'],
+ 'include': True},
+ 'expose_metrics': {'value': params['expose_metrics'], 'include': True},
+ 'metrics_image': {'value': params['metrics_image'], 'include': True},
+ 'stats_user': {'value': params['stats_user'], 'include': True},
+ 'stats_password': {'value': params['stats_password'], 'include': True},
+ 'stats_port': {'value': params['stats_port'], 'include': True},
+ # extra
+ 'cacert_file': {'value': params['cacert_file'], 'include': False},
+ # edits
+ 'edits': {'value': params['edits'], 'include': False},
+ })
+
+
+ state = params['state']
+
+ ocrouter = Router(rconfig, verbose=params['debug'])
+
+ api_rval = ocrouter.get()
+
+ ########
+ # get
+ ########
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocrouter.exists():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ # In case of delete we return a list of each object
+ # that represents a router and its result in a list
+ # pylint: disable=redefined-variable-type
+ api_rval = ocrouter.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocrouter.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ api_rval = ocrouter.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if not ocrouter.needs_update():
+ return {'changed': False, 'state': state}
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocrouter.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
index 21129a50c..51d3ce996 100644
--- a/roles/lib_openshift/src/class/oc_obj.py
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -77,7 +77,6 @@ class OCObject(OpenShiftCLI):
if objects['returncode'] != 0:
return objects
- # pylint: disable=no-member
data = None
if files:
data = Utils.get_resource_file(files[0], content_type)
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
index cb743e19d..3935525f1 100644
--- a/roles/lib_openshift/src/class/oc_route.py
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -55,13 +55,15 @@ class OCRoute(OpenShiftCLI):
def update(self):
'''update the object'''
- # need to update the tls information and the service name
- return self._replace_content(self.kind, self.config.name, self.config.data)
+ return self._replace_content(self.kind,
+ self.config.name,
+ self.config.data,
+ force=(self.config.host != self.route.get_host()))
def needs_update(self):
''' verify an update is needed '''
skip = []
- return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
diff --git a/roles/lib_openshift/src/class/oc_sdnvalidator.py b/roles/lib_openshift/src/class/oc_sdnvalidator.py
new file mode 100644
index 000000000..da923337b
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_sdnvalidator.py
@@ -0,0 +1,58 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCSDNValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCSDNValidator '''
+ # namespace has no meaning for SDN validation, hardcode to 'default'
+ super(OCSDNValidator, self).__init__('default', kubeconfig)
+
+ def get(self, kind, invalid_filter):
+ ''' return SDN information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, filter(invalid_filter, rval['results'][0]['items'])
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ sdnvalidator = OCSDNValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = sdnvalidator.get(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid}
+
+ return {'msg': 'All SDN objects are valid.'}
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index 5eac27572..deb36a9fa 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -29,7 +29,7 @@ class OCSecret(OpenShiftCLI):
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if self.decode:
- if results['results'][0].has_key('data'):
+ if 'data' in results['results'][0]:
for sname, value in results['results'][0]['data'].items():
results['decoded'][sname] = base64.b64decode(value)
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index d4cc83a59..20cf23df5 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -22,7 +22,7 @@ class OCService(OpenShiftCLI):
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
- super(OCService, self).__init__(namespace, kubeconfig)
+ super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
cluster_ip, portal_ip, session_affinity, service_type)
@@ -93,7 +93,9 @@ class OCService(OpenShiftCLI):
params['portalip'],
params['ports'],
params['session_affinity'],
- params['service_type'])
+ params['service_type'],
+ params['kubeconfig'],
+ params['debug'])
state = params['state']