diff options
182 files changed, 8416 insertions, 2668 deletions
diff --git a/.gitignore b/.gitignore index cacc711a1..8f46c269f 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ .DS_Store gce.ini multi_ec2.yaml +multi_inventory.yaml .vagrant diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 92f545b25..c2f5784ce 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.0.6-1 ./ +3.0.12-1 ./ @@ -6,7 +6,7 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise. - Install base dependencies: - Fedora: ``` - yum install -y ansible rubygem-thor rubygem-parseconfig util-linux + dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography ``` - OSX: ``` diff --git a/README_AWS.md b/README_AWS.md index 6757e2892..d9e2ac5a9 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -105,7 +105,7 @@ Install Dependencies 1. Ansible requires python-boto for aws operations: RHEL/CentOS/Fedora ``` - yum install -y ansible python-boto + yum install -y ansible python-boto pyOpenSSL ``` OSX: ``` diff --git a/README_vagrant.md b/README_vagrant.md index 5f87d6633..73fd31476 100644 --- a/README_vagrant.md +++ b/README_vagrant.md @@ -1,8 +1,8 @@ Requirements ------------ +- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient). - vagrant (tested against version 1.7.2) - vagrant-hostmanager plugin (tested against version 1.5.0) -- vagrant-registration plugin (only required for enterprise deployment type) - vagrant-libvirt (tested against version 0.0.26) - Only required if using libvirt instead of virtualbox @@ -43,7 +43,8 @@ The following environment variables can be overriden: - ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online) - ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2) -For ``enterprise`` deployment types these env variables should also be specified: +Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role: + - ``rhel_subscription_user``: rhsm user - ``rhel_subscription_pass``: rhsm password - (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects diff --git a/Vagrantfile b/Vagrantfile index 33532cd63..362e1ff48 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -16,27 +16,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| config.hostmanager.include_offline = true config.ssh.insert_key = false - if deployment_type === 'enterprise' - unless Vagrant.has_plugin?('vagrant-registration') - raise 'vagrant-registration-plugin is required for enterprise deployment' - end - username = ENV['rhel_subscription_user'] - password = ENV['rhel_subscription_pass'] - unless username and password - raise 'rhel_subscription_user and rhel_subscription_pass are required' - end - config.registration.username = username - config.registration.password = password - # FIXME this is temporary until vagrant/ansible registration modules - # are capable of handling specific subscription pools - if not ENV['rhel_subscription_pool'].nil? - config.vm.provision "shell" do |s| - s.inline = "subscription-manager attach --pool=$1 || true" - s.args = "#{ENV['rhel_subscription_pool']}" - end - end - end - config.vm.provider "virtualbox" do |vbox, override| override.vm.box = "centos/7" vbox.memory = 1024 diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION index 5f05df7fc..49bba3acc 100644 --- a/bin/README_SHELL_COMPLETION +++ b/bin/README_SHELL_COMPLETION @@ -14,7 +14,7 @@ will populate the cache file and the completions should become available. This script will look at the cached version of your -multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache. +multi_inventory results in ~/.ansible/tmp/multi_inventory.cache. It will then parse a few {host}.{env} out of the json and return them to be completable. diff --git a/bin/cluster b/bin/cluster index 59a6755d3..220f11d49 100755 --- a/bin/cluster +++ b/bin/cluster @@ -57,7 +57,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) env['num_masters'] = args.masters @@ -74,7 +74,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -86,7 +86,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -98,7 +98,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -110,7 +110,7 @@ class Cluster(object): """ env = {'cluster_id': args.cluster_id, 'deployment_type': self.get_deployment_type(args)} - playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) @@ -124,7 +124,7 @@ class Cluster(object): 'deployment_type': self.get_deployment_type(args), 'new_cluster_state': args.state} - playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider) + playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider) inventory = self.setup_provider(args.provider) self.action(args, inventory, env, playbook) diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example index e891b855a..8786dfc13 100644 --- a/bin/openshift_ansible.conf.example +++ b/bin/openshift_ansible.conf.example @@ -1,5 +1,5 @@ #[main] -#inventory = /usr/share/ansible/inventory/multi_ec2.py +#inventory = /usr/share/ansible/inventory/multi_inventory.py #[host_type_aliases] #host-type-one = aliasa,aliasb diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py index 9df034f57..45345007c 100644 --- a/bin/openshift_ansible/awsutil.py +++ b/bin/openshift_ansible/awsutil.py @@ -4,7 +4,10 @@ import os import re -from openshift_ansible import multi_ec2 + +# Buildbot does not have multi_inventory installed +#pylint: disable=no-name-in-module +from openshift_ansible import multi_inventory class ArgumentError(Exception): """This class is raised when improper arguments are passed.""" @@ -49,9 +52,9 @@ class AwsUtil(object): Keyword arguments: args -- optional arguments to pass to the inventory script """ - mec2 = multi_ec2.MultiEc2(args) - mec2.run() - return mec2.result + minv = multi_inventory.MultiInventory(args) + minv.run() + return minv.result def get_environments(self): """Searches for env tags in the inventory and returns all of the envs found.""" diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py deleted file mode 120000 index 660a0418e..000000000 --- a/bin/openshift_ansible/multi_ec2.py +++ /dev/null @@ -1 +0,0 @@ -../../inventory/multi_ec2.py
\ No newline at end of file diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py new file mode 120000 index 000000000..b40feec07 --- /dev/null +++ b/bin/openshift_ansible/multi_inventory.py @@ -0,0 +1 @@ +../../inventory/multi_inventory.py
\ No newline at end of file diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion index 5072161f0..997ff0f9c 100755 --- a/bin/ossh_bash_completion +++ b/bin/ossh_bash_completion @@ -1,12 +1,12 @@ __ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])' fi } @@ -26,13 +26,13 @@ complete -F _ossh ossh oscp __opssh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])' fi } diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion index 44500c618..3c4018636 100644 --- a/bin/ossh_zsh_completion +++ b/bin/ossh_zsh_completion @@ -2,13 +2,13 @@ _ossh_known_hosts(){ if python -c 'import openshift_ansible' &>/dev/null; then - print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') - elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') - elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') + elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])') fi diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh index 7c6cb7b0b..d205e1055 100644 --- a/bin/zsh_functions/_ossh +++ b/bin/zsh_functions/_ossh @@ -1,8 +1,8 @@ #compdef ossh oscp _ossh_known_hosts(){ - if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then - print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') + if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then + print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])') fi } diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index a57b0f895..2386b5878 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible from ansible import errors from operator import itemgetter +import OpenSSL.crypto +import os.path import pdb import re import json @@ -241,6 +243,21 @@ class FilterModule(object): return string.split(separator) @staticmethod + def oo_haproxy_backend_masters(hosts): + ''' This takes an array of dicts and returns an array of dicts + to be used as a backend for the haproxy role + ''' + servers = [] + for idx, host_info in enumerate(hosts): + server = dict(name="master%s" % idx) + server_ip = host_info['openshift']['common']['ip'] + server_port = host_info['openshift']['master']['api_port'] + server['address'] = "%s:%s" % (server_ip, server_port) + server['opts'] = 'check' + servers.append(server) + return servers + + @staticmethod def oo_filter_list(data, filter_attr=None): ''' This returns a list, which contains all items where filter_attr evaluates to true @@ -258,7 +275,7 @@ class FilterModule(object): raise errors.AnsibleFilterError("|failed expects filter_attr is a str") # Gather up the values for the list of keys passed in - return [x for x in data if x[filter_attr]] + return [x for x in data if x.has_key(filter_attr) and x[filter_attr]] @staticmethod def oo_parse_heat_stack_outputs(data): @@ -327,6 +344,120 @@ class FilterModule(object): return revamped_outputs + @staticmethod + # pylint: disable=too-many-branches + def oo_parse_certificate_names(certificates, data_dir, internal_hostnames): + ''' Parses names from list of certificate hashes. + + Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt", + "keyfile": "/etc/origin/master/custom1.key" }, + { "certfile": "custom2.crt", + "keyfile": "custom2.key" }] + + returns [{ "certfile": "/etc/origin/master/custom1.crt", + "keyfile": "/etc/origin/master/custom1.key", + "names": [ "public-master-host.com", + "other-master-host.com" ] }, + { "certfile": "/etc/origin/master/custom2.crt", + "keyfile": "/etc/origin/master/custom2.key", + "names": [ "some-hostname.com" ] }] + ''' + if not issubclass(type(certificates), list): + raise errors.AnsibleFilterError("|failed expects certificates is a list") + + if not issubclass(type(data_dir), unicode): + raise errors.AnsibleFilterError("|failed expects data_dir is unicode") + + if not issubclass(type(internal_hostnames), list): + raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") + + for certificate in certificates: + if 'names' in certificate.keys(): + continue + else: + certificate['names'] = [] + + if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']): + raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % + (certificate['certfile'], certificate['keyfile'])) + + try: + st_cert = open(certificate['certfile'], 'rt').read() + cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) + certificate['names'].append(str(cert.get_subject().commonName.decode())) + for i in range(cert.get_extension_count()): + if cert.get_extension(i).get_short_name() == 'subjectAltName': + for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): + certificate['names'].append(name) + except: + raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + + "please specify certificate names in host inventory")) + + certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] + certificate['names'] = list(set(certificate['names'])) + if not certificate['names']: + raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + + "detected a collision with internal hostname, please specify " + + "certificate names in host inventory")) + return certificates + + @staticmethod + def oo_pretty_print_cluster(data): + ''' Read a subset of hostvars and build a summary of the cluster + in the following layout: + +"c_id": { + "master": [ + { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1", "subtype": "default" }] + "node": [ + { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2", "subtype": "infra" }, + { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3", "subtype": "compute" }, + ... + ]} + ''' + + def _get_tag_value(tags, key): + ''' Extract values of a map implemented as a set. + Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' } + key = 'bar' + returns 'value2' + ''' + for tag in tags: + # Skip tag_env-host-type to avoid ambiguity with tag_env + if tag[:17] == 'tag_env-host-type': + continue + if tag[:len(key)+4] == 'tag_' + key: + return tag[len(key)+5:] + raise KeyError(key) + + def _add_host(clusters, + env, + host_type, + sub_host_type, + host): + ''' Add a new host in the clusters data structure ''' + if env not in clusters: + clusters[env] = {} + if host_type not in clusters[env]: + clusters[env][host_type] = {} + if sub_host_type not in clusters[env][host_type]: + clusters[env][host_type][sub_host_type] = [] + clusters[env][host_type][sub_host_type].append(host) + + clusters = {} + for host in data: + try: + _add_host(clusters=clusters, + env=_get_tag_value(host['group_names'], 'env'), + host_type=_get_tag_value(host['group_names'], 'host-type'), + sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'), + host={'name': host['inventory_hostname'], + 'public IP': host['ansible_ssh_host'], + 'private IP': host['ansible_default_ipv4']['address']}) + except KeyError: + pass + return clusters + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -342,5 +473,8 @@ class FilterModule(object): "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, - "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs + "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, + "oo_parse_certificate_names": self.oo_parse_certificate_names, + "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters, + "oo_pretty_print_cluster": self.oo_pretty_print_cluster } diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py index c44b874e8..fcfe43777 100644 --- a/filter_plugins/oo_zabbix_filters.py +++ b/filter_plugins/oo_zabbix_filters.py @@ -95,6 +95,54 @@ class FilterModule(object): return data + @staticmethod + def itservice_results_builder(data, clusters, keys): + '''Take a list of dict results, + loop through each results and create a hash + of: + [{clusterid: cluster1, key: 111 }] + ''' + r_list = [] + for cluster in clusters: + for results in data: + if cluster == results['item'][0]: + results = results['results'] + if results and len(results) > 0 and all([results[0].has_key(_key) for _key in keys]): + tmp = {} + tmp['clusterid'] = cluster + for key in keys: + tmp[key] = results[0][key] + r_list.append(tmp) + + return r_list + + @staticmethod + def itservice_dependency_builder(data, cluster): + '''Take a list of dict results, + loop through each results and create a hash + of: + [{clusterid: cluster1, key: 111 }] + ''' + r_list = [] + for dep in data: + if cluster == dep['clusterid']: + r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'}) + + return r_list + + @staticmethod + def itservice_dep_builder_list(data): + '''Take a list of dict results, + loop through each results and create a hash + of: + [{clusterid: cluster1, key: 111 }] + ''' + r_list = [] + for dep in data: + r_list.append({'name': '%s' % dep, 'dep_type': 'hard'}) + + return r_list + def filters(self): ''' returns a mapping of filters to methods ''' return { @@ -105,4 +153,7 @@ class FilterModule(object): "create_data": self.create_data, "oo_build_zabbix_collect": self.oo_build_zabbix_collect, "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict, + "itservice_results_builder": self.itservice_results_builder, + "itservice_dependency_builder": self.itservice_dependency_builder, + "itservice_dep_builder_list": self.itservice_dep_builder_list, } diff --git a/git/pylint.sh b/git/pylint.sh index 55e8b6131..f29c055dc 100755 --- a/git/pylint.sh +++ b/git/pylint.sh @@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do fi done +export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/ + if [ "${FILES_TO_TEST}" != "" ]; then echo "Testing files: ${FILES_TO_TEST}" exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST} diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index ad19fe116..56bbb9612 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -5,6 +5,7 @@ masters nodes etcd +lb # Set variables common for all OSEv3 hosts [OSEv3:vars] @@ -41,6 +42,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Allow all auth #openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + # Project Configuration #osm_project_request_message='' #osm_project_request_template='' @@ -57,21 +61,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # Set cockpit plugins #osm_cockpit_plugins=['cockpit-kubernetes'] -# master cluster ha variables using pacemaker or RHEL HA +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker #openshift_master_cluster_password=openshift_cluster #openshift_master_cluster_vip=192.168.133.25 #openshift_master_cluster_public_vip=192.168.133.25 #openshift_master_cluster_hostname=openshift-ansible.test.example.com #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -# master cluster ha variables when using a different HA solution -# For installation the value of openshift_master_cluster_hostname must resolve -# to the first master defined in the inventory. -# The HA solution must be manually configured after installation and must ensure -# that the master is running on a single master host. -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_defer_ha=True +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 # default subdomain to use for exposed routes #osm_default_subdomain=apps.test.example.com @@ -99,6 +111,30 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # set RPM version for debugging purposes #openshift_pkg_version=-3.0.0.0 +# Configure custom master certificates +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com @@ -106,6 +142,9 @@ ose3-master[1:3]-ansible.test.example.com [etcd] ose3-etcd[1:3]-ansible.test.example.com +[lb] +ose3-lb-ansible.test.example.com + # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes # However, in order to ensure that your masters are not burdened with running pods you should # make them unschedulable by adding openshift_scheduleable=False any node that's also a master. diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py index 6ed12e011..99746cdbf 100755 --- a/inventory/gce/hosts/gce.py +++ b/inventory/gce/hosts/gce.py @@ -66,12 +66,22 @@ Examples: $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson <erjohnso@google.com> Version: 0.0.1 ''' +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" @@ -102,9 +112,9 @@ class GceInventory(object): # Just display data for specific host if self.args.host: - print self.json_format_dict(self.node_to_dict( + print(self.json_format_dict(self.node_to_dict( self.get_instance(self.args.host)), - pretty=self.args.pretty) + pretty=self.args.pretty)) sys.exit(0) # Otherwise, assume user wants all instances grouped @@ -120,7 +130,6 @@ class GceInventory(object): os.path.dirname(os.path.realpath(__file__)), "gce.ini") gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - # Create a ConfigParser. # This provides empty defaults to each key, so that environment # variable configuration (as opposed to INI configuration) is able @@ -174,7 +183,6 @@ class GceInventory(object): args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - # Retrieve and return the GCE driver. gce = get_driver(Provider.GCE)(*args, **kwargs) gce.connection.user_agent_append( @@ -213,8 +221,7 @@ class GceInventory(object): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - # Hosts don't always have a public IP name - #'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -222,15 +229,15 @@ class GceInventory(object): 'gce_tags': inst.extra['tags'], 'gce_metadata': md, 'gce_network': net, - # Hosts don't always have a public IP name - #'ansible_ssh_host': inst.public_ips[0] + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): '''Gets details about a specific instance ''' try: return self.driver.ex_get_node(instance_name) - except Exception, e: + except Exception as e: return None def group_instances(self): @@ -250,7 +257,10 @@ class GceInventory(object): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example deleted file mode 100644 index bbd81ad20..000000000 --- a/inventory/multi_ec2.yaml.example +++ /dev/null @@ -1,32 +0,0 @@ -# multi ec2 inventory configs -# -cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache - -accounts: - - name: aws1 - provider: aws/hosts/ec2.py - provider_config: - ec2: - regions: all - regions_exclude: us-gov-west-1,cn-north-1 - destination_variable: public_dns_name - route53: False - cache_path: ~/.ansible/tmp - cache_max_age: 300 - vpc_destination_variable: ip_address - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - all_group: ec2 - extra_vars: - cloud: aws - account: aws1 - -- name: aws2 - provider: aws/hosts/ec2.py - env_vars: - AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX - AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX - EC2_INI_PATH: /etc/ansible/ec2.ini - -cache_max_age: 60 diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py index 98dde3f3c..354a8c10c 100755 --- a/inventory/multi_ec2.py +++ b/inventory/multi_inventory.py @@ -1,6 +1,6 @@ #!/usr/bin/env python2 ''' - Fetch and combine multiple ec2 account settings into a single + Fetch and combine multiple inventory account settings into a single json hash. ''' # vim: expandtab:tabstop=4:shiftwidth=4 @@ -15,13 +15,19 @@ import errno import fcntl import tempfile import copy +from string import Template +import shutil -CONFIG_FILE_NAME = 'multi_ec2.yaml' -DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache') +CONFIG_FILE_NAME = 'multi_inventory.yaml' +DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache') -class MultiEc2(object): +class MultiInventoryException(Exception): + '''Exceptions for MultiInventory class''' + pass + +class MultiInventory(object): ''' - MultiEc2 class: + MultiInventory class: Opens a yaml config file and reads aws credentials. Stores a json hash of resources in result. ''' @@ -35,7 +41,7 @@ class MultiEc2(object): self.cache_path = DEFAULT_CACHE_PATH self.config = None - self.all_ec2_results = {} + self.all_inventory_results = {} self.result = {} self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__))) @@ -56,7 +62,7 @@ class MultiEc2(object): cache is valid for the inventory. if the cache is valid; return cache - else the credentials are loaded from multi_ec2.yaml or from the env + else the credentials are loaded from multi_inventory.yaml or from the env and we attempt to get the inventory from the provider specified. ''' # load yaml @@ -111,6 +117,10 @@ class MultiEc2(object): with open(conf_file) as conf: config = yaml.safe_load(conf) + # Provide a check for unique account names + if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']): + raise MultiInventoryException('Duplicate account names in config file') + return config def get_provider_tags(self, provider, env=None): @@ -136,23 +146,25 @@ class MultiEc2(object): else: cmds.append('--list') - cmds.append('--refresh-cache') + if 'aws' in provider.lower(): + cmds.append('--refresh-cache') return subprocess.Popen(cmds, stderr=subprocess.PIPE, \ stdout=subprocess.PIPE, env=env) @staticmethod - def generate_config(config_data): - """Generate the ec2.ini file in as a secure temp file. - Once generated, pass it to the ec2.py as an environment variable. + def generate_config(provider_files): + """Generate the provider_files in a temporary directory. """ - fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.') - for section, values in config_data.items(): - os.write(fildes, "[%s]\n" % section) - for option, value in values.items(): - os.write(fildes, "%s = %s\n" % (option, value)) - os.close(fildes) - return tmp_file_path + prefix = 'multi_inventory.' + tmp_dir_path = tempfile.mkdtemp(prefix=prefix) + for provider_file in provider_files: + filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+') + content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path) + filedes.write(content) + filedes.close() + + return tmp_dir_path def run_provider(self): '''Setup the provider call with proper variables @@ -160,13 +172,21 @@ class MultiEc2(object): ''' try: all_results = [] - tmp_file_paths = [] + tmp_dir_paths = [] processes = {} for account in self.config['accounts']: - env = account['env_vars'] - if account.has_key('provider_config'): - tmp_file_paths.append(MultiEc2.generate_config(account['provider_config'])) - env['EC2_INI_PATH'] = tmp_file_paths[-1] + tmp_dir = None + if account.has_key('provider_files'): + tmp_dir = MultiInventory.generate_config(account['provider_files']) + tmp_dir_paths.append(tmp_dir) + + # Update env vars after creating provider_config_files + # so that we can grab the tmp_dir if it exists + env = account.get('env_vars', {}) + if env and tmp_dir: + for key, value in env.items(): + env[key] = Template(value).substitute(tmpdir=tmp_dir) + name = account['name'] provider = account['provider'] processes[name] = self.get_provider_tags(provider, env) @@ -182,9 +202,9 @@ class MultiEc2(object): }) finally: - # Clean up the mkstemp file - for tmp_file in tmp_file_paths: - os.unlink(tmp_file) + # Clean up the mkdtemp dirs + for tmp_dir in tmp_dir_paths: + shutil.rmtree(tmp_dir) return all_results @@ -223,7 +243,7 @@ class MultiEc2(object): ] raise RuntimeError('\n'.join(err_msg).format(**result)) else: - self.all_ec2_results[result['name']] = json.loads(result['out']) + self.all_inventory_results[result['name']] = json.loads(result['out']) # Check if user wants extra vars in yaml by # having hostvars and all_group defined @@ -231,29 +251,52 @@ class MultiEc2(object): self.apply_account_config(acc_config) # Build results by merging all dictionaries - values = self.all_ec2_results.values() + values = self.all_inventory_results.values() values.insert(0, self.result) for result in values: - MultiEc2.merge_destructively(self.result, result) + MultiInventory.merge_destructively(self.result, result) + + def add_entry(self, data, keys, item): + ''' Add an item to a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + item = c + ''' + if "." in keys: + key, rest = keys.split(".", 1) + if key not in data: + data[key] = {} + self.add_entry(data[key], rest, item) + else: + data[keys] = item + + def get_entry(self, data, keys): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + keys = a.b + return c + ''' + if keys and "." in keys: + key, rest = keys.split(".", 1) + return self.get_entry(data[key], rest) + else: + return data.get(keys, None) def apply_account_config(self, acc_config): ''' Apply account config settings ''' - results = self.all_ec2_results[acc_config['name']] + results = self.all_inventory_results[acc_config['name']] + results['all_hosts'] = results['_meta']['hostvars'].keys() # Update each hostvar with the newly desired key: value from extra_* - for _extra in ['extra_groups', 'extra_vars']: + for _extra in ['extra_vars', 'extra_groups']: for new_var, value in acc_config.get(_extra, {}).items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): - for data in results['_meta']['hostvars'].values(): - data[str(new_var)] = str(value) + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, new_var, value) # Add this group - if _extra == 'extra_groups' and results.has_key(acc_config['all_group']): - results["%s_%s" % (new_var, value)] = \ - copy.copy(results[acc_config['all_group']]) + if _extra == 'extra_groups': + results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts']) # Clone groups goes here for to_name, from_name in acc_config.get('clone_groups', {}).items(): @@ -262,14 +305,11 @@ class MultiEc2(object): # Clone vars goes here for to_name, from_name in acc_config.get('clone_vars', {}).items(): - # Verify the account results look sane - # by checking for these keys ('_meta' and 'hostvars' exist) - if results.has_key('_meta') and results['_meta'].has_key('hostvars'): - for data in results['_meta']['hostvars'].values(): - data[str(to_name)] = data.get(str(from_name), 'nil') + for data in results['_meta']['hostvars'].values(): + self.add_entry(data, to_name, self.get_entry(data, from_name)) - # store the results back into all_ec2_results - self.all_ec2_results[acc_config['name']] = results + # store the results back into all_inventory_results + self.all_inventory_results[acc_config['name']] = results @staticmethod def merge_destructively(input_a, input_b): @@ -277,7 +317,7 @@ class MultiEc2(object): for key in input_b: if key in input_a: if isinstance(input_a[key], dict) and isinstance(input_b[key], dict): - MultiEc2.merge_destructively(input_a[key], input_b[key]) + MultiInventory.merge_destructively(input_a[key], input_b[key]) elif input_a[key] == input_b[key]: pass # same leaf value # both lists so add each element in b to a if it does ! exist @@ -333,7 +373,7 @@ class MultiEc2(object): if exc.errno != errno.EEXIST or not os.path.isdir(path): raise - json_data = MultiEc2.json_format_dict(self.result, True) + json_data = MultiInventory.json_format_dict(self.result, True) with open(self.cache_path, 'w') as cache: try: fcntl.flock(cache, fcntl.LOCK_EX) @@ -369,7 +409,7 @@ class MultiEc2(object): if __name__ == "__main__": - MEC2 = MultiEc2() - MEC2.parse_cli_args() - MEC2.run() - print MEC2.result_str() + MI2 = MultiInventory() + MI2.parse_cli_args() + MI2.run() + print MI2.result_str() diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example new file mode 100644 index 000000000..0f0788d18 --- /dev/null +++ b/inventory/multi_inventory.yaml.example @@ -0,0 +1,51 @@ +# multi ec2 inventory configs +# +cache_location: ~/.ansible/tmp/multi_inventory.cache + +accounts: + - name: aws1 + provider: aws/ec2.py + provider_files: + - name: ec2.ini + content: |- + [ec2] + regions = all + regions_exclude = us-gov-west-1,cn-north-1 + destination_variable = public_dns_name + route53 = False + cache_path = ~/.ansible/tmp + cache_max_age = 300 + vpc_destination_variable = ip_address + env_vars: + AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX + AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + extra_vars: + cloud: aws + account: aws1 + +- name: mygce + extra_vars: + cloud: gce + account: gce1 + env_vars: + GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider. + provider: gce/gce.py + provider_files: + - name: priv_key.pem + contents: |- + -----BEGIN PRIVATE KEY----- + yourprivatekeydatahere + -----END PRIVATE KEY----- + - name: gce.ini + contents: |- + [gce] + gce_service_account_email_address = <uuid>@developer.gserviceaccount.com + gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider. + gce_project_id = gce-project + zone = us-central1-a + network = default + gce_machine_type = n1-standard-2 + gce_machine_image = rhel7 + +cache_max_age: 600 diff --git a/openshift-ansible.spec b/openshift-ansible.spec index d034e6d84..21f624400 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -5,7 +5,7 @@ } Name: openshift-ansible -Version: 3.0.6 +Version: 3.0.12 Release: 1%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 @@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf # Fix links -rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py +rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws -ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py +ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws # openshift-ansible-docs install @@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible mkdir -p %{buildroot}%{_datadir}/ansible/inventory mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce -cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory -cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml +cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory +cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce @@ -82,6 +82,8 @@ pushd utils %{__python} setup.py install --skip-build --root %{buildroot} # Remove this line once the name change has happened mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer +mkdir -p %{buildroot}%{_datadir}/atomic-openshift-utils/ +cp etc/ansible.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible.cfg popd # Base openshift-ansible files @@ -104,6 +106,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada %files bin %{_bindir}/* +%exclude %{_bindir}/atomic-openshift-installer %{python_sitelib}/openshift_ansible/ /etc/bash_completion.d/* %config(noreplace) /etc/openshift_ansible/ @@ -137,7 +140,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks. %files inventory %config(noreplace) /etc/ansible/* %dir %{_datadir}/ansible/inventory -%{_datadir}/ansible/inventory/multi_ec2.py* +%{_datadir}/ansible/inventory/multi_inventory.py* %package inventory-aws Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS @@ -170,6 +173,9 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook %package playbooks Summary: Openshift and Atomic Enterprise Ansible Playbooks Requires: %{name} +Requires: %{name}-roles +Requires: %{name}-lookup-plugins +Requires: %{name}-filter-plugins BuildArch: noarch %description playbooks @@ -185,6 +191,8 @@ BuildArch: noarch %package roles Summary: Openshift and Atomic Enterprise Ansible roles Requires: %{name} +Requires: %{name}-lookup-plugins +Requires: %{name}-filter-plugins BuildArch: noarch %description roles @@ -246,9 +254,226 @@ Atomic OpenShift Utilities includes %files -n atomic-openshift-utils %{python_sitelib}/ooinstall* %{_bindir}/atomic-openshift-installer +%{_datadir}/atomic-openshift-utils/ansible.cfg %changelog +* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.12-1 +- Sync with the latest image streams (sdodson@redhat.com) + +* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.11-1 +- Migrate xpaas content from pre v1.1.0 (sdodson@redhat.com) +- Import latest xpaas templates and image streams (sdodson@redhat.com) + +* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.10-1 +- Fix update error for templates that didn't previously exist + (jdetiber@redhat.com) +- General cleanup of v3_0_to_v3_1/upgrade.yml (jdetiber@redhat.com) +- Add zabbix pieces to hold AWS S3 bucket stats (jdiaz@redhat.com) +- add ansible dep to vagrant doc (jdetiber@redhat.com) +- oo_filter: don't fail when attribute is not defined (tob@butter.sh) + +* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.9-1 +- Refactor upgrade playbook(s) (jdetiber@redhat.com) + +* Tue Nov 10 2015 Scott Dodson <sdodson@redhat.com> 3.0.8-1 +- Add origin-clients to uninstall playbook. (abutcher@redhat.com) +- examples: include logging and metrics infrastructure (lmeyer@redhat.com) +- Add separate step to enable services during upgrade. (dgoodwin@redhat.com) +- Update tests now that cli is not asking for rpm/container install + (smunilla@redhat.com) +- atomic-openshift-installer: Remove question for container install + (smunilla@redhat.com) +- Remove references to multi_ec2.py (jdetiber@redhat.com) +- 1279746: Fix leftover disabled features line in config template. + (dgoodwin@redhat.com) +- 1279734: Ensure services are enabled after upgrade. (dgoodwin@redhat.com) +- Fix missing etcd_data_dir bug. (dgoodwin@redhat.com) +- Package the default ansible.cfg with atomic-openshift-utils. + (dgoodwin@redhat.com) +- Add ldap auth identity provider to example inventory. (abutcher@redhat.com) +- Read etcd data dir from appropriate config file. (dgoodwin@redhat.com) +- atomic-openshift-installer: Generate inventory off hosts_to_run_on + (smunilla@redhat.com) +- Various fixes related to connect_to (bleanhar@redhat.com) +- Remove upgrade playbook restriction on 3.0.2. (dgoodwin@redhat.com) +- Conditionals for flannel etcd client certs. (abutcher@redhat.com) +- New `iptablesSyncPeriod` field in node configuration (abutcher@redhat.com) +- Fix indentation on when (jdetiber@redhat.com) +- Bug 1278863 - Error using openshift_pkg_version (jdetiber@redhat.com) +- more cleanup of names (mwoodson@redhat.com) +- Missing conditionals for api/controller sysconfig. (abutcher@redhat.com) +- Updating the atomic-openshift-isntaller local connection logic for the + connect_to addition. (bleanhar@redhat.com) +- cleaned up network checks (mwoodson@redhat.com) +- Minor upgrade improvements. (dgoodwin@redhat.com) +- Wait for cluster to recover after pcs resource restart. (abutcher@redhat.com) +- Bug 1278245 - Failed to add node to existing env using atomic-openshift- + installer (bleanhar@redhat.com) +- remove debug statement (jdetiber@redhat.com) +- Fix removal of kubernetesMasterConfig.apiLevels (jdetiber@redhat.com) +- atomic-openshift-installer: Better specification of ansible connection point + (smunilla@redhat.com) +- Fix issues related to upgrade packages being unavailable + (jdetiber@redhat.com) +- added network checks. also updated item prototype code to support more + (mwoodson@redhat.com) +- Fix data_dir for 3.0 deployments (jdetiber@redhat.com) +- Fix apiLevels modifications (jdetiber@redhat.com) +- Fix creation of origin symlink when dir already exists. (dgoodwin@redhat.com) +- apiLevel changes (jdetiber@redhat.com) +- Write new config to disk after successful upgrade. (dgoodwin@redhat.com) +- Fix pylint errors with getting hosts to run on. (dgoodwin@redhat.com) +- Remove v1beta3 by default for kube_nfs_volumes (jdetiber@redhat.com) +- Add pre-upgrade script to be run on first master. (dgoodwin@redhat.com) +- Start to handle pacemaker ha during upgrade (abutcher@redhat.com) +- Fix lb group related errors (jdetiber@redhat.com) +- Fix file check conditional. (abutcher@redhat.com) +- Don't check for certs in data_dir just raise when they can't be found. Fix + typo. (abutcher@redhat.com) +- exclude atomic-openshift-installer from bin subpackage (tdawson@redhat.com) +- add master_hostnames definition for upgrade (jdetiber@redhat.com) +- Additional upgrade enhancements (jdetiber@redhat.com) +- Handle backups for separate etcd hosts if necessary. (dgoodwin@redhat.com) +- Further upgrade improvements (jdetiber@redhat.com) +- Upgrade improvements (dgoodwin@redhat.com) +- Bug 1278243 - Confusing prompt from atomic-openshift-installer + (bleanhar@redhat.com) +- Bug 1278244 - Previously there was no way to add a node in unattended mode + (bleanhar@redhat.com) +- Revert to defaults (abutcher@redhat.com) +- Bug 1278244 - Incorrect node information gathered by atomic-openshift- + installer (bleanhar@redhat.com) +- atomic-openshift-installer's unattended mode wasn't work with --force for all + cases (bleanhar@redhat.com) +- Making it easier to use pre-release content (bleanhar@redhat.com) +- The uninstall playbook needs to remove /run/openshift-sdn + (bleanhar@redhat.com) +- Various HA changes for pacemaker and native methods. (abutcher@redhat.com) +- Bug 1274201 - Fixing non-root installations if using a local connection + (bleanhar@redhat.com) +- Bug 1274201 - Fixing sudo non-interactive test (bleanhar@redhat.com) +- Bug 1277592 - SDN MTU has hardcoded default (jdetiber@redhat.com) +- Atomic Enterprise/OpenShift Enterprise merge update (jdetiber@redhat.com) +- fix dueling controllers - without controllerLeaseTTL set in config, multiple + controllers will attempt to start (jdetiber@redhat.com) +- default to source persistence for haproxy (jdetiber@redhat.com) +- hardcode openshift binaries for now (jdetiber@redhat.com) +- more tweaks (jdetiber@redhat.com) +- more tweaks (jdetiber@redhat.com) +- additional ha related updates (jdetiber@redhat.com) +- additional native ha changes (abutcher@redhat.com) +- Start of true master ha (jdetiber@redhat.com) +- Atomic Enterprise related changes. (avagarwa@redhat.com) +- Remove pacemaker bits. (abutcher@redhat.com) +- Override hosts deployment_type fact for version we're upgrading to. + (dgoodwin@redhat.com) +- Pylint fixes for config upgrade module. (dgoodwin@redhat.com) +- Disable proxy cert config upgrade until certs being generated. + (dgoodwin@redhat.com) +- remove debug line (florian.lambert@enovance.com) +- [roles/openshift_master_certificates/tasks/main.yml] Fix variable + openshift.master.all_hostnames to openshift.common.all_hostnames + (florian.lambert@enovance.com) +- Fix bug with not upgrading openshift-master to atomic-openshift-master. + (dgoodwin@redhat.com) +- Adding aws and gce packages to ansible-inventory (kwoodson@redhat.com) +- Fix subpackage dependencies (jdetiber@redhat.com) +- Refactor common group evaluation to avoid duplication (jdetiber@redhat.com) +- common/openshift-cluster: Scaleup playbook (smunilla@redhat.com) +- Fix bug from module rename. (dgoodwin@redhat.com) +- Fix bug with default ansible playbook dir. (dgoodwin@redhat.com) +- Use the base package upgrade version so we can check things earlier. + (dgoodwin@redhat.com) +- Skip fail if enterprise deployment type depending on version. + (dgoodwin@redhat.com) +- Add debug output for location of etcd backup. (dgoodwin@redhat.com) +- Filter internal hostnames from the list of parsed names. + (abutcher@redhat.com) +- Move config upgrade to correct place, fix node facts. (dgoodwin@redhat.com) +- Add custom certificates to serving info in master configuration. + (abutcher@redhat.com) +- Add in proxyClientInfo if missing during config upgrade. + (dgoodwin@redhat.com) +- Implement master-config.yaml upgrade for v1beta3 apiLevel removal. + (dgoodwin@redhat.com) +- Fix installer upgrade bug following pylint fix. (dgoodwin@redhat.com) +- Document the new version field for installer config. (dgoodwin@redhat.com) +- Remove my username from some test data. (dgoodwin@redhat.com) +- Add a simple version for the installer config file. (dgoodwin@redhat.com) +- Pylint fix. (dgoodwin@redhat.com) +- Fix issue with master.proxy-client.{crt,key} and omit. (abutcher@redhat.com) +- initial module framework (jdetiber@redhat.com) +- Better info prior to initiating upgrade. (dgoodwin@redhat.com) +- Fix etcd backup bug with not-yet-created /var/lib/origin symlink + (dgoodwin@redhat.com) +- Print info after upgrade completes. (dgoodwin@redhat.com) +- Automatically upgrade legacy config files. (dgoodwin@redhat.com) +- Remove devel fail and let upgrade proceed. (dgoodwin@redhat.com) +- Add utils subpackage missing dep on openshift-ansible-roles. + (dgoodwin@redhat.com) +- Generate timestamped etcd backups. (dgoodwin@redhat.com) +- Add etcd_data_dir fact. (dgoodwin@redhat.com) +- Functional disk space checking for etcd backup. (dgoodwin@redhat.com) +- First cut at checking available disk space for etcd backup. + (dgoodwin@redhat.com) +- Block upgrade if targetting enterprise deployment type. (dgoodwin@redhat.com) +- Change flannel registration default values (sbaubeau@redhat.com) +- Remove empty notify section (sbaubeau@redhat.com) +- Check etcd certs exist for flannel when its support is enabled + (sbaubeau@redhat.com) +- Fix when neither use_openshift_sdn nor use_flannel are specified + (sbaubeau@redhat.com) +- Generate etcd certificats for flannel when is not embedded + (sbaubeau@redhat.com) +- Add missing 2nd true parameters to default Jinja filter (sbaubeau@redhat.com) +- Use 'command' module instead of 'shell' (sbaubeau@redhat.com) +- Add flannel modules documentation (sbaubeau@redhat.com) +- Only remove IPv4 address from docker bridge (sbaubeau@redhat.com) +- Remove multiple use_flannel fact definition (sbaubeau@redhat.com) +- Ensure openshift-sdn and flannel can't be used at the same time + (sbaubeau@redhat.com) +- Add flannel support (sbaubeau@redhat.com) + +* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1 +- added the %%util in zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct default playbook directory + (smunilla@redhat.com) +- Support for gce (kwoodson@redhat.com) +- fixed a dumb naming mistake (mwoodson@redhat.com) +- added disk tps checks to zabbix (mwoodson@redhat.com) +- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com) +- atomic-openshift-installer: Add default openshift-ansible-playbook + (smunilla@redhat.com) +- ooinstall: Add check for nopwd sudo (smunilla@redhat.com) +- ooinstall: Update local install check (smunilla@redhat.com) +- oo-install: Support running on the host to be deployed (smunilla@redhat.com) +- Moving to Openshift Etcd application (mmahut@redhat.com) +- Add all the possible servicenames to openshift_all_hostnames for masters + (sdodson@redhat.com) +- Adding openshift.node.etcd items (mmahut@redhat.com) +- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com) +- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com) +- split inventory into subpackages (tdawson@redhat.com) +- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to + warning (mwoodson@redhat.com) +- Rename install_transactions module to openshift_ansible. + (dgoodwin@redhat.com) +- atomic-openshift-installer: Text improvements (smunilla@redhat.com) +- Add utils subpackage missing dep on openshift-ansible-roles. + (dgoodwin@redhat.com) +- Disable requiretty for only the openshift user (error@ioerror.us) +- Don't require tty to run sudo (error@ioerror.us) +- Attempt to remove the various interfaces left over from an install + (bleanhar@redhat.com) +- Pulling latest gce.py module from ansible (kwoodson@redhat.com) +- Disable OpenShift features if installing Atomic Enterprise + (jdetiber@redhat.com) +- Use default playbooks if available. (dgoodwin@redhat.com) +- Add uninstall subcommand. (dgoodwin@redhat.com) +- Add subcommands to CLI. (dgoodwin@redhat.com) +- Remove images options in oadm command (nakayamakenjiro@gmail.com) + * Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1 - Adding python-boto and python-libcloud to openshift-ansible-inventory dependency (kwoodson@redhat.com) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0503b7cd4..e0dbad900 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -45,6 +45,7 @@ - origin-master-api - origin-master-controllers - origin-node + - pcsd - yum: name={{ item }} state=absent when: not is_atomic | bool @@ -58,6 +59,7 @@ - atomic-openshift-master - atomic-openshift-node - atomic-openshift-sdn-ovs + - corosync - etcd - openshift - openshift-master @@ -66,9 +68,12 @@ - openshift-sdn-ovs - openvswitch - origin + - origin-clients - origin-master - origin-node - origin-sdn-ovs + - pacemaker + - pcs - tuned-profiles-atomic-enterprise-node - tuned-profiles-atomic-openshift-node - tuned-profiles-openshift-node @@ -136,8 +141,10 @@ - file: path={{ item }} state=absent with_items: + - "~{{ ansible_ssh_user }}/.kube" - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise + - /etc/corosync - /etc/etcd - /etc/openshift - /etc/openshift-sdn @@ -151,9 +158,13 @@ - /etc/sysconfig/origin-master - /etc/sysconfig/origin-node - /root/.kube - - "~{{ ansible_ssh_user }}/.kube" + - /run/openshift-sdn - /usr/share/openshift/examples - /var/lib/atomic-enterprise - /var/lib/etcd - /var/lib/openshift - /var/lib/origin + - /var/lib/pacemaker + + - name: restart docker + service: name=docker state=restarted diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins deleted file mode 120000 index b0b7a3414..000000000 --- a/playbooks/adhoc/upgrades/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins deleted file mode 120000 index 73cafffe5..000000000 --- a/playbooks/adhoc/upgrades/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/adhoc/upgrades/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index a8e3e27bb..5aa6b0f9b 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -11,6 +11,7 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" + g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 786918929..09bf34666 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -11,7 +11,7 @@ msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' - - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ etcd_names }}" @@ -19,7 +19,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -27,7 +27,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -38,7 +38,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 9e50a4a18..411c7e660 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -4,6 +4,7 @@ g_etcd_group: "{{ 'etcd' }}" g_masters_group: "{{ 'masters' }}" g_nodes_group: "{{ 'nodes' }}" + g_lb_group: "{{ 'lb' }}" openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: 2 openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md new file mode 100644 index 000000000..ce7aebf8e --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -0,0 +1,8 @@ +# Upgrade playbooks +The playbooks provided in this directory can be used for upgrading an existing +environment. Additional notes for the associated upgrade playbooks are +provided in their respective directories. + +# Upgrades available +- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md) +- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md) diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md index 6de8a970f..c91a6cb96 100644 --- a/playbooks/adhoc/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md @@ -1,11 +1,11 @@ -# [NOTE] -This playbook will re-run installation steps overwriting any local +# v3.0 minor upgrade playbook +**Note:** This playbook will re-run installation steps overwriting any local modifications. You should ensure that your inventory has been updated with any modifications you've made after your initial installation. If you find any items that cannot be configured via ansible please open an issue at https://github.com/openshift/openshift-ansible -# Overview +## Overview This playbook is available as a technical preview. It currently performs the following steps. @@ -17,5 +17,5 @@ following steps. * Updates the default registry if one exists * Updates image streams and quickstarts -# Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml new file mode 100644 index 000000000..76fa9ba22 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -0,0 +1,9 @@ +--- +- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + g_lb_group: "{{ 'lb' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md new file mode 100644 index 000000000..c434be5b7 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md @@ -0,0 +1,17 @@ +# v3.0 to v3.1 upgrade playbook + +## Overview +This playbook currently performs the +following steps. + +**TODO: update for current steps** + * Upgrade and restart master services + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml new file mode 100644 index 000000000..b06442366 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -0,0 +1,9 @@ +--- +- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + g_lb_group: "{{ 'lb' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 4c74f96db..a8bd634d3 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,68 +1,5 @@ --- -- name: Populate config host groups - hosts: localhost - gather_facts: no - tasks: - - fail: - msg: This playbook rquires g_etcd_group to be set - when: g_etcd_group is not defined - - - fail: - msg: This playbook rquires g_masters_group to be set - when: g_masters_group is not defined - - - fail: - msg: This playbook rquires g_nodes_group to be set - when: g_nodes_group is not defined - - - name: Evaluate oo_etcd_to_config - add_host: - name: "{{ item }}" - groups: oo_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_etcd_group] | default([]) - - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_nodes_group] | default([]) - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - with_items: groups[g_masters_group] | default([]) - when: g_nodeonmaster is defined and g_nodeonmaster == true - - - name: Evaluate oo_first_etcd - add_host: - name: "{{ groups[g_etcd_group][0] }}" - groups: oo_first_etcd - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 - - - name: Evaluate oo_first_master - add_host: - name: "{{ groups[g_masters_group][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 +- include: evaluate_groups.yml - include: ../openshift-etcd/config.yml @@ -71,4 +8,4 @@ - include: ../openshift-node/config.yml vars: osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" - osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml new file mode 100644 index 000000000..2bb69614f --- /dev/null +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -0,0 +1,76 @@ +--- +- name: Populate config host groups + hosts: localhost + gather_facts: no + tasks: + - fail: + msg: This playbook requires g_etcd_group to be set + when: g_etcd_group is not defined + + - fail: + msg: This playbook requires g_masters_group to be set + when: g_masters_group is not defined + + - fail: + msg: This playbook requires g_nodes_group to be set + when: g_nodes_group is not defined + + - fail: + msg: This playbook requires g_lb_group to be set + when: g_lb_group is not defined + + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_etcd_group] | default([]) + + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_nodes_group] | default([]) + + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_masters_group] | default([]) + when: g_nodeonmaster is defined and g_nodeonmaster == true + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ groups[g_etcd_group][0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0 + + - name: Evaluate oo_first_master + add_host: + name: "{{ groups[g_masters_group][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 + + - name: Evaluate oo_lb_to_config + add_host: + name: "{{ item }}" + groups: oo_lb_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_sudo: "{{ g_sudo | default(omit) }}" + with_items: groups[g_lb_group] | default([]) diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml new file mode 100644 index 000000000..6d2777732 --- /dev/null +++ b/playbooks/common/openshift-cluster/scaleup.yml @@ -0,0 +1,16 @@ +--- +- include: evaluate_groups.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + g_lb_group: "{{ 'lb' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 2 + openshift_deployment_type: "{{ deployment_type }}" + +- include: ../openshift-node/config.yml + vars: + osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..1a6580795 100644 --- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..36d7b7870 100644 --- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..278942f8b 100644 --- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check new file mode 100644 index 000000000..b5459f312 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check @@ -0,0 +1,190 @@ +#!/usr/bin/env python +""" +Pre-upgrade checks that must be run on a master before proceeding with upgrade. +""" +# This is a script not a python module: +# pylint: disable=invalid-name + +# NOTE: This script should not require any python libs other than what is +# in the standard library. + +__license__ = "ASL 2.0" + +import json +import os +import subprocess +import re + +# The maximum length of container.ports.name +ALLOWED_LENGTH = 15 +# The valid structure of container.ports.name +ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$') +AT_LEAST_ONE_LETTER = re.compile('[a-z]') +# look at OS_PATH for the full path. Default ot 'oc' +OC_PATH = os.getenv('OC_PATH', 'oc') + + +def validate(value): + """ + validate verifies that value matches required conventions + + Rules of container.ports.name validation: + + * must be less that 16 chars + * at least one letter + * only a-z0-9- + * hyphens can not be leading or trailing or next to each other + + :Parameters: + - `value`: Value to validate + """ + if len(value) > ALLOWED_LENGTH: + return False + + if '--' in value: + return False + + # We search since it can be anywhere + if not AT_LEAST_ONE_LETTER.search(value): + return False + + # We match because it must start at the beginning + if not ALLOWED_CHARS.match(value): + return False + return True + + +def list_items(kind): + """ + list_items returns a list of items from the api + + :Parameters: + - `kind`: Kind of item to access + """ + response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind]) + items = json.loads(response) + return items.get("items", []) + + +def get(obj, *paths): + """ + Gets an object + + :Parameters: + - `obj`: A dictionary structure + - `path`: All other non-keyword arguments + """ + ret_obj = obj + for path in paths: + if ret_obj.get(path, None) is None: + return [] + ret_obj = ret_obj[path] + return ret_obj + + +# pylint: disable=too-many-arguments +def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid): + """ + Prints out results in human friendly way. + + :Parameters: + - `namespace`: Namespace of the resource + - `kind`: Kind of the resource + - `item_name`: Name of the resource + - `container_name`: Name of the container. May be "" when kind=Service. + - `port_name`: Name of the port + - `invalid_label`: The label of the invalid port. Port.name/targetPort + - `valid`: True if the port is valid + """ + if not valid: + if len(container_name) > 0: + print('%s/%s -n %s (Container="%s" %s="%s")' % ( + kind, item_name, namespace, container_name, invalid_label, port_name)) + else: + print('%s/%s -n %s (%s="%s")' % ( + kind, item_name, namespace, invalid_label, port_name)) + + +def print_validation_header(): + """ + Prints the error header. Should run on the first error to avoid + overwhelming the user. + """ + print """\ +At least one port name does not validate. Valid port names: + + * must be less that 16 chars + * have at least one letter + * only a-z0-9- + * do not start or end with - + * Dashes may not be next to eachother ('--') +""" + + +def main(): + """ + main is the main entry point to this script + """ + try: + # the comma at the end suppresses the newline + print "Checking for oc ...", + subprocess.check_output([OC_PATH, 'whoami']) + print "found" + except: + print( + 'Unable to run "%s whoami"\n' + 'Please ensure OpenShift is running, and "oc" is on your system ' + 'path.\n' + 'You can override the path with the OC_PATH environment variable.' + % OC_PATH) + raise SystemExit(1) + + # Where the magic happens + first_error = True + for kind, path in [ + ('replicationcontrollers', ("spec", "template", "spec", "containers")), + ('pods', ("spec", "containers")), + ('deploymentconfigs', ("spec", "template", "spec", "containers"))]: + for item in list_items(kind): + namespace = item["metadata"]["namespace"] + item_name = item["metadata"]["name"] + for container in get(item, *path): + container_name = container["name"] + for port in get(container, "ports"): + port_name = port.get("name", None) + if not port_name: + # Unnamed ports are OK + continue + valid = validate(port_name) + if not valid and first_error: + first_error = False + print_validation_header() + pretty_print_errors( + namespace, kind, item_name, + container_name, "Port.name", port_name, valid) + + # Services follow a different flow + for item in list_items('services'): + namespace = item["metadata"]["namespace"] + item_name = item["metadata"]["name"] + for port in get(item, "spec", "ports"): + port_name = port.get("targetPort", None) + if isinstance(port_name, int) or port_name is None: + # Integer only or unnamed ports are OK + continue + valid = validate(port_name) + if not valid and first_error: + first_error = False + print_validation_header() + pretty_print_errors( + namespace, "services", item_name, "", + "targetPort", port_name, valid) + + # If we had at least 1 error then exit with 1 + if not first_error: + raise SystemExit(1) + + +if __name__ == '__main__': + main() + diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh new file mode 100644 index 000000000..f90719cab --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ') + +yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ') + + +echo "---" +echo "curr_version: ${yum_installed}" +echo "avail_version: ${yum_available}" diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins new file mode 120000 index 000000000..b1213dedb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/filter_plugins @@ -0,0 +1 @@ +../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py new file mode 100755 index 000000000..a6721bb92 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -0,0 +1,154 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +"""Ansible module for modifying OpenShift configs during an upgrade""" + +import os +import yaml + +DOCUMENTATION = ''' +--- +module: openshift_upgrade_config +short_description: OpenShift Upgrade Config +author: Jason DeTiberus +requirements: [ ] +''' +EXAMPLES = ''' +''' + +def modify_api_levels(level_list, remove, ensure, msg_prepend='', + msg_append=''): + """ modify_api_levels """ + changed = False + changes = [] + + if not isinstance(remove, list): + remove = [] + + if not isinstance(ensure, list): + ensure = [] + + if not isinstance(level_list, list): + new_list = [] + changed = True + changes.append("%s created missing %s" % (msg_prepend, msg_append)) + else: + new_list = level_list + for level in remove: + if level in new_list: + new_list.remove(level) + changed = True + changes.append("%s removed %s %s" % (msg_prepend, level, msg_append)) + + for level in ensure: + if level not in new_list: + new_list.append(level) + changed = True + changes.append("%s added %s %s" % (msg_prepend, level, msg_append)) + + return {'new_list': new_list, 'changed': changed, 'changes': changes} + + +def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup): + """Main upgrade method for 3.0 to 3.1.""" + changes = [] + + # Facts do not get transferred to the hosts where custom modules run, + # need to make some assumptions here. + master_config = os.path.join(config_base, 'master/master-config.yaml') + + master_cfg_file = open(master_config, 'r') + config = yaml.safe_load(master_cfg_file.read()) + master_cfg_file.close() + + + # Remove unsupported api versions and ensure supported api versions from + # master config + unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3'] + supported_levels = ['v1'] + + result = modify_api_levels(config.get('apiLevels'), unsupported_levels, + supported_levels, 'master-config.yaml:', 'from apiLevels') + if result['changed']: + config['apiLevels'] = result['new_list'] + changes.append(result['changes']) + + if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']: + config['kubernetesMasterConfig'].pop('apiLevels') + changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels') + + # Add proxyClientInfo to master-config + if 'proxyClientInfo' not in config['kubernetesMasterConfig']: + config['kubernetesMasterConfig']['proxyClientInfo'] = { + 'certFile': 'master.proxy-client.crt', + 'keyFile': 'master.proxy-client.key' + } + changes.append("master-config.yaml: added proxyClientInfo") + + if len(changes) > 0: + if backup: + # TODO: Check success: + ansible_module.backup_local(master_config) + + # Write the modified config: + out_file = open(master_config, 'w') + out_file.write(yaml.safe_dump(config, default_flow_style=False)) + out_file.close() + + return changes + + +def upgrade_master(ansible_module, config_base, from_version, to_version, backup): + """Upgrade entry point.""" + if from_version == '3.0': + if to_version == '3.1': + return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup) + + +def main(): + """ main """ + # disabling pylint errors for global-variable-undefined and invalid-name + # for 'global module' usage, since it is required to use ansible_facts + # pylint: disable=global-variable-undefined, invalid-name, + # redefined-outer-name + global module + + module = AnsibleModule( + argument_spec=dict( + config_base=dict(required=True), + from_version=dict(required=True, choices=['3.0']), + to_version=dict(required=True, choices=['3.1']), + role=dict(required=True, choices=['master']), + backup=dict(required=False, default=True, type='bool') + ), + supports_check_mode=True, + ) + + from_version = module.params['from_version'] + to_version = module.params['to_version'] + role = module.params['role'] + backup = module.params['backup'] + config_base = module.params['config_base'] + + try: + changes = [] + if role == 'master': + changes = upgrade_master(module, config_base, from_version, + to_version, backup) + + changed = len(changes) > 0 + return module.exit_json(changed=changed, changes=changes) + + # ignore broad-except error to avoid stack trace to ansible user + # pylint: disable=broad-except + except Exception, e: + return module.fail_json(msg=str(e)) + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins new file mode 120000 index 000000000..aff753026 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/lookup_plugins @@ -0,0 +1 @@ +../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/roles b/playbooks/common/openshift-cluster/upgrades/roles new file mode 120000 index 000000000..4bdbcbad3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/roles @@ -0,0 +1 @@ +../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library @@ -0,0 +1 @@ +../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index ae1d0127c..9f7e49b93 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -1,25 +1,12 @@ --- -- name: Upgrade base package on masters - hosts: masters - roles: - - openshift_facts - vars: - openshift_version: "{{ openshift_pkg_version | default('') }}" - tasks: - - name: Upgrade base package - yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest +- name: Evaluate groups + include: ../../evaluate_groups.yml - name: Re-Run cluster configuration to apply latest configuration changes - include: ../../common/openshift-cluster/config.yml - vars: - g_etcd_group: "{{ 'etcd' }}" - g_masters_group: "{{ 'masters' }}" - g_nodes_group: "{{ 'nodes' }}" - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" + include: ../../config.yml - name: Upgrade masters - hosts: masters + hosts: oo_masters_to_config vars: openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: @@ -29,7 +16,7 @@ service: name="{{ openshift.common.service_type}}-master" state=restarted - name: Upgrade nodes - hosts: nodes + hosts: oo_nodes_to_config vars: openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: @@ -60,19 +47,6 @@ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --confirm -- name: Update cluster policy bindings - hosts: oo_first_master - tasks: - - name: oadm policy reconcile-cluster-role-bindings --confirm - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') - - name: Upgrade default router hosts: oo_first_master vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library @@ -0,0 +1 @@ +../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml new file mode 100644 index 000000000..eea147229 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -0,0 +1,550 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Evaluate host groups + include: ../../evaluate_groups.yml + +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +- name: Evaluate additional groups for upgrade + hosts: localhost + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + gather_facts: no + tasks: + # Pacemaker is currently the only supported upgrade path for multiple masters + - fail: + msg: "openshift_master_cluster_method must be set to 'pacemaker'" + when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker")) + + - fail: + msg: > + This upgrade is only supported for origin and openshift-enterprise + deployment types + when: deployment_type not in ['origin','openshift-enterprise'] + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a 3.1 upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<') + + # If this script errors out ansible will show the default stdout/stderr + # which contains details for the user: + - script: ../files/pre-upgrade-check + + +- name: Verify upgrade can proceed + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - name: Clean yum cache + command: yum clean all + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + + - name: Determine available versions + script: ../files/versions.sh {{ g_new_service_name }} openshift + register: g_versions_result + + - set_fact: + g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}" + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + + - fail: + msg: This playbook requires Origin 1.0.6 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<') + + - fail: + msg: Atomic OpenShift 3.1 packages not found + when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<')) + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 + + + +############################################################################### +# Backup etcd +############################################################################### +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + yum: + pkg: etcd + state: latest + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 + + + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Create temp directory for syncing certs + hosts: localhost + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_master_mktemp + changed_when: False + +- name: Update deployment type + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: common + local_facts: + deployment_type: "{{ deployment_type }}" + +- name: Update master facts + hosts: oo_masters_to_config + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" + +- name: Upgrade master packages and configuration + hosts: oo_masters_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade to latest available kernel + yum: + pkg: kernel + state: latest + + - name: Upgrade master packages + command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} + + - name: Ensure python-yaml present for config upgrade + yum: + pkg: PyYAML + state: installed + + - name: Upgrade master configuration + openshift_upgrade_config: + from_version: '3.0' + to_version: '3.1' + role: master + config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + + - set_fact: + master_certs_missing: True + master_cert_subdir: master-{{ openshift.common.hostname }} + master_cert_config_dir: "{{ openshift.common.config_base }}/master" + + +- name: Generate missing master certificates + hosts: oo_first_master + vars: + master_hostnames: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('openshift.common.all_hostnames') + | oo_flatten | unique }}" + master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs" + masters_needing_certs: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | difference([groups.oo_first_master.0]) }}" + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_master_certificates + post_tasks: + - name: Remove generated etcd client certs when using external etcd + file: + path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" + state: absent + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config + with_nested: + - masters_needing_certs + - - master.etcd-client.crt + - master.etcd-client.key + + - name: Create a tarball of the master certs + command: > + tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz + -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} . + with_items: masters_needing_certs + + - name: Retrieve the master cert tarball from the master + fetch: + src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: masters_needing_certs + + +- name: Sync generated certs, update service config and restart master services + hosts: oo_masters_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + openshift_deployment_type: "{{ deployment_type }}" + tasks: + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz" + dest: "{{ master_cert_config_dir }}" + when: inventory_hostname != groups.oo_first_master.0 + + - name: Restart master service + service: name="{{ openshift.common.service_type}}-master" state=restarted + when: not openshift_master_ha | bool + + - name: Ensure the master service is enabled + service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes + when: not openshift_master_ha | bool + + - name: Check for configured cluster + stat: + path: /etc/corosync/corosync.conf + register: corosync_conf + when: openshift_master_ha | bool + + - name: Destroy cluster + command: pcs cluster destroy --all + when: openshift_master_ha | bool and corosync_conf.stat.exists == true + run_once: true + + - name: Start pcsd + service: name=pcsd enabled=yes state=started + when: openshift_master_ha | bool + + +- name: Re-create cluster + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + openshift_deployment_type: "{{ deployment_type }}" + omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}" + roles: + - role: openshift_master_cluster + when: openshift_master_ha | bool + + +- name: Delete temporary directory on localhost + hosts: localhost + gather_facts: no + tasks: + - file: name={{ g_master_mktemp.stdout }} state=absent + changed_when: False + + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + roles: + - openshift_facts + tasks: + - name: Upgrade node packages + command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }} + + - name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + + - name: Ensure node service enabled + service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes + + - set_fact: + node_update_complete: True + + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + + +############################################################################### +# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings +############################################################################### +- name: Reconcile Cluster Roles and Cluster Role Bindings + hosts: oo_masters_to_config + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted + when: not openshift_master_ha | bool + + - name: Restart master cluster + command: pcs resource restart master + when: openshift_master_ha | bool + run_once: true + + - name: Wait for the clustered master service to be available + wait_for: + host: "{{ openshift_master_cluster_vip }}" + port: 8443 + state: started + timeout: 180 + delay: 90 + when: openshift_master_ha | bool + run_once: true + + - set_fact: + reconcile_complete: True + + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 + + + + +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + # Create the new templates shipped in 3.1, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + openshift_examples_import_command: replace + pre_tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + + - name: Check for allowHostNetwork and allowHostPorts + when: _default_router.rc == 0 + shell: > + {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork + register: _scc + + - name: Grant allowHostNetwork and allowHostPorts + when: + - _default_router.rc == 0 + - "'false' in _scc.stdout" + command: > + {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 + + - name: Update deployment config to 1.0.4/3.0.1 spec + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' + + - name: Switch to hostNetwork=true + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' + + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 952960652..ed23ada88 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -13,6 +13,8 @@ hostname: "{{ openshift_hostname | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" + - role: etcd + local_facts: {} - name: Check status of etcd certificates stat: path: "{{ item }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1dec923fc..b1da85d5d 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -34,7 +34,9 @@ - role: common local_facts: hostname: "{{ openshift_hostname | default(None) }}" + ip: "{{ openshift_ip | default(None) }}" public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" - role: master local_facts: @@ -44,7 +46,6 @@ public_api_url: "{{ openshift_master_public_api_url | default(None) }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}" console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" @@ -168,6 +169,10 @@ masters_needing_certs: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master'])) | oo_filter_list(filter_attr='master_certs_missing') }}" + master_hostnames: "{{ hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('openshift.common.all_hostnames') + | oo_flatten | unique }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" roles: - openshift_master_certificates @@ -199,12 +204,84 @@ validate_checksum: yes with_items: masters_needing_certs +- name: Inspect named certificates + hosts: oo_first_master + tasks: + - name: Collect certificate names + set_fact: + parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}" + when: openshift_master_named_certificates is defined + +- name: Compute haproxy_backend_servers + hosts: localhost + connection: local + sudo: false + gather_facts: no + tasks: + - set_fact: + haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}" + +- name: Configure load balancers + hosts: oo_lb_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + haproxy_frontends: + - name: atomic-openshift-api + mode: tcp + options: + - tcplog + binds: + - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" + default_backend: atomic-openshift-api + haproxy_backends: + - name: atomic-openshift-api + mode: tcp + option: tcplog + balance: source + servers: "{{ hostvars.localhost.haproxy_backend_servers }}" + roles: + - role: haproxy + when: groups.oo_masters_to_config | length > 1 + +- name: Generate master session keys + hosts: oo_first_master + tasks: + - fail: + msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set" + when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined) + - fail: + msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length" + when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length) + - name: Generate session authentication key + command: /usr/bin/openssl rand -base64 24 + register: session_auth_output + with_sequence: count=1 + when: openshift_master_session_auth_secrets is undefined + - name: Generate session encryption key + command: /usr/bin/openssl rand -base64 24 + register: session_encryption_output + with_sequence: count=1 + when: openshift_master_session_encryption_secrets is undefined + - set_fact: + session_auth_secret: "{{ openshift_master_session_auth_secrets + | default(session_auth_output.results + | map(attribute='stdout') + | list) }}" + session_encryption_secret: "{{ openshift_master_session_encryption_secrets + | default(session_encryption_output.results + | map(attribute='stdout') + | list) }}" + - name: Configure master instances hosts: oo_masters_to_config + serial: 1 vars: + named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" - embedded_etcd: "{{ openshift.master.embedded_etcd }}" + openshift_master_count: "{{ groups.oo_masters_to_config | length }}" + openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}" + openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -233,11 +310,25 @@ omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}" roles: - role: openshift_master_cluster - when: openshift_master_ha | bool + when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - openshift_examples - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool +- name: Determine cluster dns ip + hosts: oo_first_master + tasks: + - name: Get master service ip + command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}" + register: master_service_ip_output + when: openshift.common.version_greater_than_3_1_or_1_1 | bool + - set_fact: + cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" + when: not openshift.common.version_greater_than_3_1_or_1_1 | bool + - set_fact: + cluster_dns_ip: "{{ master_service_ip_output.stdout }}" + when: openshift.common.version_greater_than_3_1_or_1_1 | bool + - name: Enable cockpit hosts: oo_first_master vars: @@ -247,6 +338,14 @@ when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and (osm_use_cockpit | bool or osm_use_cockpit is undefined ) +- name: Configure flannel + hosts: oo_first_master + vars: + etcd_urls: "{{ openshift.master.etcd_urls }}" + roles: + - role: flannel_register + when: openshift.common.use_flannel | bool + # Additional instance config for online deployments - name: Additional instance config hosts: oo_masters_deployment_type_online diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index a14ca8e11..8da9e231f 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -38,6 +38,22 @@ node_subdir: node-{{ openshift.common.hostname }} config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}" node_cert_dir: "{{ openshift.common.config_base }}/node" + - name: Check status of flannel external etcd certificates + stat: + path: "{{ openshift.common.config_base }}/node/{{ item }}" + with_items: + - node.etcd-client.crt + - node.etcd-ca.crt + register: g_external_etcd_flannel_cert_stat_result + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) + - set_fact: + etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results + | map(attribute='stat.exists') + | list | intersect([false])}}" + etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }} + etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" + etcd_cert_prefix: node.etcd- + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) - name: Create temp directory for syncing certs hosts: localhost @@ -50,6 +66,64 @@ register: mktemp changed_when: False +- name: Configure flannel etcd certificates + hosts: oo_first_etcd + vars: + etcd_generated_certs_dir: /etc/etcd/generated_certs + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + pre_tasks: + - set_fact: + etcd_needing_client_certs: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}" + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + roles: + - role: etcd_certificates + post_tasks: + - name: Create a tarball of the etcd flannel certs + command: > + tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz + -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . + args: + creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + with_items: etcd_needing_client_certs + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + - name: Retrieve the etcd cert tarballs + fetch: + src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: etcd_needing_client_certs + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + +- name: Copy the external etcd flannel certs to the nodes + hosts: oo_nodes_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + tasks: + - name: Ensure certificate directory exists + file: + path: "{{ openshift.common.config_base }}/node" + state: directory + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz" + dest: "{{ etcd_cert_config_dir }}" + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + - file: + path: "{{ etcd_cert_config_dir }}/{{ item }}" + owner: root + group: root + mode: 0600 + with_items: + - node.etcd-client.crt + - node.etcd-client.key + - node.etcd-ca.crt + when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing + - name: Create node certificates hosts: oo_first_master vars: @@ -84,6 +158,8 @@ vars: sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" + etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -100,6 +176,8 @@ when: certs_missing roles: - openshift_node + - role: flannel + when: openshift.common.use_flannel | bool - role: nickhammond.logrotate - role: fluentd_node when: openshift.common.use_fluentd | bool diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 6ca4f7395..745161bcb 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -16,6 +16,7 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" + g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml index 0dfa3e9d7..c8f6065cd 100644 --- a/playbooks/gce/openshift-cluster/join_node.yml +++ b/playbooks/gce/openshift-cluster/join_node.yml @@ -46,4 +46,4 @@ openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} " os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" - osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" + osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index c22b897d5..8be5d53e7 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -9,7 +9,7 @@ - fail: msg="Deployment type not supported for gce provider yet" when: deployment_type == 'enterprise' - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -17,7 +17,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -28,7 +28,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index c208eee81..4d1ae22ff 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -15,6 +15,7 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}" + g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}" g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}" g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index d3e768de5..8d7949dd1 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -17,7 +17,7 @@ - include: tasks/configure_libvirt.yml - - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ etcd_names }}" @@ -25,7 +25,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - include: tasks/launch_instances.yml vars: instances: "{{ master_names }}" @@ -33,7 +33,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "default" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "compute" count: "{{ num_nodes }}" @@ -44,7 +44,7 @@ type: "{{ k8s_type }}" g_sub_host_type: "{{ sub_host_type }}" - - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml vars: type: "infra" count: "{{ num_infra }}" diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index eaedc4d0d..5954bb01e 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -18,6 +18,12 @@ - name: List Hosts hosts: oo_list_hosts + +- name: List Hosts + hosts: localhost + gather_facts: no + vars_files: + - vars.yml tasks: - debug: - msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}' + msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 4b91c6da8..4825207c9 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -81,7 +81,7 @@ ansible_ssh_host: '{{ item.1 }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}' + groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}' with_together: - instances - ips diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index df200e374..870bcf2a6 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -6,6 +6,7 @@ <ansible:tag>env-{{ cluster }}</ansible:tag> <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag> <ansible:tag>host-type-{{ type }}</ansible:tag> + <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> </ansible:tags> </metadata> <currentMemory unit='GiB'>1</currentMemory> diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data index eacae7c7e..e0c966e45 100644 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -19,5 +19,11 @@ system_info: ssh_authorized_keys: - {{ lookup('file', '~/.ssh/id_rsa.pub') }} +write_files: + - path: /etc/sudoers.d/00-openshift-no-requiretty + permissions: 440 + content: | + Defaults:openshift !requiretty + runcmd: - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index a5ee2d6a5..888804e28 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -10,6 +10,7 @@ - include: ../../common/openshift-cluster/config.yml vars: g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}" + g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}" g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}" g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}" g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}" diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index a75e350c7..fa194b072 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -19,6 +19,12 @@ - name: List Hosts hosts: oo_list_hosts + +- name: List Hosts + hosts: localhost + gather_facts: no + vars_files: + - vars.yml tasks: - debug: - msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}' + msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/roles/copr_cli/README.md b/roles/copr_cli/README.md new file mode 100644 index 000000000..edc68454e --- /dev/null +++ b/roles/copr_cli/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +This role manages Copr CLI. + +https://apps.fedoraproject.org/packages/copr-cli/ + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: servers + roles: + - role: copr_cli + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Thomas Wiest diff --git a/roles/copr_cli/defaults/main.yml b/roles/copr_cli/defaults/main.yml new file mode 100644 index 000000000..3b8adf910 --- /dev/null +++ b/roles/copr_cli/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for copr_cli diff --git a/roles/copr_cli/handlers/main.yml b/roles/copr_cli/handlers/main.yml new file mode 100644 index 000000000..c3dec5a4c --- /dev/null +++ b/roles/copr_cli/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for copr_cli diff --git a/roles/copr_cli/meta/main.yml b/roles/copr_cli/meta/main.yml new file mode 100644 index 000000000..f050281fd --- /dev/null +++ b/roles/copr_cli/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: Thomas Wiest + description: Manages Copr CLI + company: Red Hat + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - packaging +dependencies: [] diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml new file mode 100644 index 000000000..f7ef1c26e --- /dev/null +++ b/roles/copr_cli/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- yum: + name: copr-cli + state: present diff --git a/roles/copr_cli/vars/main.yml b/roles/copr_cli/vars/main.yml new file mode 100644 index 000000000..1522c94d9 --- /dev/null +++ b/roles/copr_cli/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for copr_cli diff --git a/roles/flannel/README.md b/roles/flannel/README.md new file mode 100644 index 000000000..b8aa830ac --- /dev/null +++ b/roles/flannel/README.md @@ -0,0 +1,45 @@ +Role Name +========= + +Configure flannel on openshift nodes + +Requirements +------------ + +This role assumes it's being deployed on a RHEL/Fedora based host with package +named 'flannel' available via yum, in version superior to 0.3. + +Role Variables +-------------- + +| Name | Default value | Description | +|---------------------|-----------------------------------------|-----------------------------------------------| +| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication | +| flannel_etcd_key | /openshift.com/network | etcd prefix | +| etcd_hosts | etcd_urls | a list of etcd endpoints | +| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory | +| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd | +| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd | +| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd | + +Dependencies +------------ + +openshift_facts + +Example Playbook +---------------- + + - hosts: openshift_node + roles: + - { role: flannel, etcd_urls: ['https://127.0.0.1:2379'] } + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Sylvain Baubeau <sbaubeau@redhat.com> diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml new file mode 100644 index 000000000..34cebda9c --- /dev/null +++ b/roles/flannel/defaults/main.yaml @@ -0,0 +1,8 @@ +--- +flannel_interface: "{{ ansible_default_ipv4.interface }}" +flannel_etcd_key: /openshift.com/network +etcd_hosts: "{{ etcd_urls }}" +etcd_conf_dir: "{{ openshift.common.config_base }}/node" +etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key" diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml new file mode 100644 index 000000000..f9b9ae7f1 --- /dev/null +++ b/roles/flannel/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: restart flanneld + sudo: true + service: name=flanneld state=restarted + +- name: restart docker + sudo: true + service: name=docker state=restarted diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml new file mode 100644 index 000000000..909bdbfa4 --- /dev/null +++ b/roles/flannel/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Sylvain + description: flannel management + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml new file mode 100644 index 000000000..acfb009ec --- /dev/null +++ b/roles/flannel/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Install flannel + sudo: true + yum: pkg=flannel state=present + +- name: Set flannel etcd url + sudo: true + lineinfile: + dest: /etc/sysconfig/flanneld + backrefs: yes + regexp: "^(FLANNEL_ETCD=)" + line: '\1{{ etcd_hosts|join(",") }}' + +- name: Set flannel etcd key + sudo: true + lineinfile: + dest: /etc/sysconfig/flanneld + backrefs: yes + regexp: "^(FLANNEL_ETCD_KEY=)" + line: '\1{{ flannel_etcd_key }}' + +- name: Set flannel options + sudo: true + lineinfile: + dest: /etc/sysconfig/flanneld + backrefs: yes + regexp: "^#?(FLANNEL_OPTIONS=)" + line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}' + +- name: Enable flanneld + sudo: true + service: + name: flanneld + state: started + enabled: yes + register: start_result + +- name: Remove docker bridge ip + sudo: true + shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0 + notify: + - restart docker + - restart node diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md new file mode 100644 index 000000000..ba7541ab1 --- /dev/null +++ b/roles/flannel_register/README.md @@ -0,0 +1,47 @@ +Role Name +========= + +Register flannel configuration into etcd + +Requirements +------------ + +This role assumes it's being deployed on a RHEL/Fedora based host with package +named 'flannel' available via yum, in version superior to 0.3. + +Role Variables +-------------- + +| Name | Default value | Description | +|---------------------|----------------------------------------------------|-------------------------------------------------| +| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication | +| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation | +| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host | +| flannel_etcd_key | /openshift.com/network | etcd prefix | +| etcd_hosts | etcd_urls | a list of etcd endpoints | +| etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory | +| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd | +| etcd_peer_cert_file | {{ etcd_conf_dir }}/master.etcd-client.crt | SSL cert to use for etcd | +| etcd_peer_key_file | {{ etcd_conf_dir }}/master.etcd-client.key | SSL key to use for etcd | + +Dependencies +------------ + +openshift_facts + +Example Playbook +---------------- + + - hosts: openshift_master + roles: + - { flannel_register } + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Sylvain Baubeau <sbaubeau@redhat.com> diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml new file mode 100644 index 000000000..269d1a17c --- /dev/null +++ b/roles/flannel_register/defaults/main.yaml @@ -0,0 +1,11 @@ +--- +flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}" +flannel_min_network: 172.30.5.0 +flannel_subnet_len: 24 +flannel_etcd_key: /openshift.com/network +etcd_hosts: "{{ etcd_urls }}" +etcd_conf_dir: "{{ openshift.common.config_base }}/master" +etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}" +etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt" +etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key" + diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml new file mode 100644 index 000000000..73bddcca4 --- /dev/null +++ b/roles/flannel_register/meta/main.yml @@ -0,0 +1,16 @@ +--- +galaxy_info: + author: Sylvain + description: register flannel configuration into etcd + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml new file mode 100644 index 000000000..1629157c8 --- /dev/null +++ b/roles/flannel_register/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Assures /etc/flannel dir exists + sudo: true + file: path=/etc/flannel state=directory + +- name: Generate etcd configuration for etcd + sudo: true + template: + src: "flannel-config.json" + dest: "/etc/flannel/config.json" + +- name: Insert flannel configuration into etcd + sudo: true + command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json' diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json new file mode 100644 index 000000000..89ce4c30b --- /dev/null +++ b/roles/flannel_register/templates/flannel-config.json @@ -0,0 +1,8 @@ +{ + "Network": "{{ flannel_network }}", + "SubnetLen": {{ flannel_subnet_len }}, + "SubnetMin": "{{ flannel_min_network }}", + "Backend": { + "Type": "host-gw" + } +} diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md new file mode 100644 index 000000000..5bc415066 --- /dev/null +++ b/roles/haproxy/README.md @@ -0,0 +1,34 @@ +HAProxy +======= + +TODO + +Requirements +------------ + +TODO + +Role Variables +-------------- + +TODO + +Dependencies +------------ + +TODO + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml new file mode 100644 index 000000000..7ba5bd485 --- /dev/null +++ b/roles/haproxy/defaults/main.yml @@ -0,0 +1,21 @@ +--- +haproxy_frontends: +- name: main + binds: + - "*:80" + default_backend: default + +haproxy_backends: +- name: default + balance: roundrobin + servers: + - name: web01 + address: 127.0.0.1:9000 + opts: check + +os_firewall_use_firewalld: False +os_firewall_allow: +- service: haproxy stats + port: "9000/tcp" +- service: haproxy balance + port: "8443/tcp" diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml new file mode 100644 index 000000000..ee60adcab --- /dev/null +++ b/roles/haproxy/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart haproxy + service: + name: haproxy + state: restarted diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml new file mode 100644 index 000000000..0fad106a9 --- /dev/null +++ b/roles/haproxy/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: HAProxy + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 +dependencies: +- { role: os_firewall } +- { role: openshift_repos } diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml new file mode 100644 index 000000000..5638b7313 --- /dev/null +++ b/roles/haproxy/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Install haproxy + yum: + pkg: haproxy + state: present + +- name: Configure haproxy + template: + src: haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + owner: root + group: root + mode: 0644 + notify: restart haproxy + +- name: Enable and start haproxy + service: + name: haproxy + state: started + enabled: yes + register: start_result + +- name: Pause 30 seconds if haproxy was just started + pause: seconds=30 + when: start_result | changed diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 000000000..c932af72f --- /dev/null +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,76 @@ +# Global settings +#--------------------------------------------------------------------- +global + chroot /var/lib/haproxy + pidfile /var/run/haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/lib/haproxy/stats + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 300s + timeout server 300s + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +listen stats :9000 + mode http + stats enable + stats uri / + +{% for frontend in haproxy_frontends %} +frontend {{ frontend.name }} +{% for bind in frontend.binds %} + bind {{ bind }} +{% endfor %} + default_backend {{ frontend.default_backend }} +{% if 'mode' in frontend %} + mode {{ frontend.mode }} +{% endif %} +{% if 'options' in frontend %} +{% for option in frontend.options %} + option {{ option }} +{% endfor %} +{% endif %} +{% if 'redirects' in frontend %} +{% for redirect in frontend.redirects %} + redirect {{ redirect }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for backend in haproxy_backends %} +backend {{ backend.name }} + balance {{ backend.balance }} +{% if 'mode' in backend %} + mode {{ backend.mode }} +{% endif %} +{% if 'options' in backend %} +{% for option in backend.options %} + option {{ option }} +{% endfor %} +{% endif %} +{% for server in backend.servers %} + server {{ server.name }} {{ server.address }} {{ server.opts }} +{% endfor %} +{% endfor %} diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md index 56c69c286..1520f79b2 100644 --- a/roles/kube_nfs_volumes/README.md +++ b/roles/kube_nfs_volumes/README.md @@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443 # Token to use for authentication to the API server kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX + +# API Version to use for kubernetes +kube_api_version: v1 ``` ## Dependencies diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml index e296492f9..bdd994d07 100644 --- a/roles/kube_nfs_volumes/defaults/main.yml +++ b/roles/kube_nfs_volumes/defaults/main.yml @@ -1,4 +1,10 @@ --- +kubernetes_url: https://172.30.0.1:443 + +kube_api_version: v1 + +kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2" + # Options of NFS exports. nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)" diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index f4a506234..d1dcf261a 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -16,10 +16,11 @@ - include: nfs.yml - name: export physical volumes - uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes - method=POST - body='{{ lookup("template", "../templates/nfs.json.j2") }}' - body_format=json - status_code=201 - HEADER_Authorization="Bearer {{ kubernetes_token }}" + uri: + url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes" + method: POST + body: "{{ lookup('template', kube_req_template) }}" + body_format: json + status_code: 201 + HEADER_Authorization: "Bearer {{ kubernetes_token }}" with_items: partition_pool diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 new file mode 120000 index 000000000..49c1191bc --- /dev/null +++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 @@ -0,0 +1 @@ +../v1beta3/nfs.json.j2
\ No newline at end of file diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 index b42886ef1..b42886ef1 100644 --- a/roles/kube_nfs_volumes/templates/nfs.json.j2 +++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py index e7fd6fa21..43498c015 100644 --- a/roles/lib_zabbix/library/zbx_itemprototype.py +++ b/roles/lib_zabbix/library/zbx_itemprototype.py @@ -67,7 +67,24 @@ def get_template(zapi, template_name): return None return content['result'][0] -def get_type(ztype): +def get_multiplier(inval): + ''' Determine the multiplier + ''' + if inval == None or inval == '': + return None, 0 + + rval = None + try: + rval = int(inval) + except ValueError: + pass + + if rval: + return rval, 1 + + return rval, 0 + +def get_zabbix_type(ztype): ''' Determine which type of discoverrule this is ''' @@ -87,6 +104,7 @@ def get_type(ztype): 'telnet': 14, 'calculated': 15, 'JMX': 16, + 'SNMP trap': 17, } for typ in _types.keys(): @@ -153,16 +171,21 @@ def main(): name=dict(default=None, type='str'), key=dict(default=None, type='str'), description=dict(default=None, type='str'), + template_name=dict(default=None, type='str'), interfaceid=dict(default=None, type='int'), - ztype=dict(default='trapper', type='str'), + zabbix_type=dict(default='trapper', type='str'), value_type=dict(default='float', type='str'), delay=dict(default=60, type='int'), lifetime=dict(default=30, type='int'), state=dict(default='present', type='str'), status=dict(default='enabled', type='str'), applications=dict(default=[], type='list'), - template_name=dict(default=None, type='str'), discoveryrule_key=dict(default=None, type='str'), + interval=dict(default=60, type='int'), + delta=dict(default=0, type='int'), + multiplier=dict(default=None, type='str'), + units=dict(default=None, type='str'), + ), #supports_check_mode=True ) @@ -205,15 +228,23 @@ def main(): # Create and Update if state == 'present': + + formula, use_multiplier = get_multiplier(module.params['multiplier']) + params = {'name': module.params['name'], 'key_': module.params['key'], 'hostid': template['templateid'], 'interfaceid': module.params['interfaceid'], 'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']), - 'type': get_type(module.params['ztype']), + 'type': get_zabbix_type(module.params['zabbix_type']), 'value_type': get_value_type(module.params['value_type']), 'applications': get_app_ids(zapi, module.params['applications'], template['templateid']), + 'formula': formula, + 'multiplier': use_multiplier, 'description': module.params['description'], + 'units': module.params['units'], + 'delay': module.params['interval'], + 'delta': module.params['delta'], } if params['type'] in [2, 5, 7, 8, 11, 15]: diff --git a/roles/lib_zabbix/library/zbx_itservice.py b/roles/lib_zabbix/library/zbx_itservice.py new file mode 100644 index 000000000..a5ee97e15 --- /dev/null +++ b/roles/lib_zabbix/library/zbx_itservice.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python +''' + Ansible module for zabbix itservices +''' +# vim: expandtab:tabstop=4:shiftwidth=4 +# +# Zabbix itservice ansible module +# +# +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is in place because each module looks similar to each other. +# These need duplicate code as their behavior is very similar +# but different for each zabbix class. +# pylint: disable=duplicate-code + +# pylint: disable=import-error +from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection + +def exists(content, key='result'): + ''' Check if key exists in content or the size of content[key] > 0 + ''' + if not content.has_key(key): + return False + + if not content[key]: + return False + + return True + +def get_parent(dependencies): + '''Put dependencies into the proper update format''' + rval = None + for dep in dependencies: + if dep['relationship'] == 'parent': + return dep + return rval + +def format_dependencies(dependencies): + '''Put dependencies into the proper update format''' + rval = [] + for dep in dependencies: + rval.append({'dependsOnServiceid': dep['serviceid'], + 'soft': get_dependency_type(dep['dep_type']), + }) + + return rval + +def get_dependency_type(dep_type): + '''Determine the dependency type''' + rval = 0 + if 'soft' == dep_type: + rval = 1 + + return rval + +def get_service_id_by_name(zapi, dependencies): + '''Fetch the service id for an itservice''' + deps = [] + for dep in dependencies: + if dep['name'] == 'root': + deps.append(dep) + continue + + content = zapi.get_content('service', + 'get', + {'filter': {'name': dep['name']}, + 'selectDependencies': 'extend', + }) + if content.has_key('result') and content['result']: + dep['serviceid'] = content['result'][0]['serviceid'] + deps.append(dep) + + return deps + +def add_dependencies(zapi, service_name, dependencies): + '''Fetch the service id for an itservice + + Add a dependency on the parent for this current service item. + ''' + + results = get_service_id_by_name(zapi, [{'name': service_name}]) + + content = {} + for dep in dependencies: + content = zapi.get_content('service', + 'adddependencies', + {'serviceid': results[0]['serviceid'], + 'dependsOnServiceid': dep['serviceid'], + 'soft': get_dependency_type(dep['dep_type']), + }) + if content.has_key('result') and content['result']: + continue + else: + break + + return content + +def get_show_sla(inc_sla): + ''' Determine the showsla paramter + ''' + rval = 1 + if 'do not cacluate' in inc_sla: + rval = 0 + return rval + +def get_algorithm(inc_algorithm_str): + ''' + Determine which type algorithm + ''' + rval = 0 + if 'at least one' in inc_algorithm_str: + rval = 1 + elif 'all' in inc_algorithm_str: + rval = 2 + + return rval + +# The branches are needed for CRUD and error handling +# pylint: disable=too-many-branches +def main(): + ''' + ansible zabbix module for zbx_itservice + ''' + + module = AnsibleModule( + argument_spec=dict( + zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), + zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), + zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), + zbx_debug=dict(default=False, type='bool'), + name=dict(default=None, type='str'), + algorithm=dict(default='do not calculate', choices=['do not calculate', 'at least one', 'all'], type='str'), + show_sla=dict(default='calculate', choices=['do not calculate', 'calculate'], type='str'), + good_sla=dict(default='99.9', type='float'), + sort_order=dict(default=1, type='int'), + state=dict(default='present', type='str'), + trigger_id=dict(default=None, type='int'), + dependencies=dict(default=[], type='list'), + dep_type=dict(default='hard', choices=['hard', 'soft'], type='str'), + ), + #supports_check_mode=True + ) + + zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], + module.params['zbx_user'], + module.params['zbx_password'], + module.params['zbx_debug'])) + + #Set the instance and the template for the rest of the calls + zbx_class_name = 'service' + state = module.params['state'] + + content = zapi.get_content(zbx_class_name, + 'get', + {'filter': {'name': module.params['name']}, + 'selectDependencies': 'extend', + }) + + #******# + # GET + #******# + if state == 'list': + module.exit_json(changed=False, results=content['result'], state="list") + + #******# + # DELETE + #******# + if state == 'absent': + if not exists(content): + module.exit_json(changed=False, state="absent") + + content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['serviceid']]) + module.exit_json(changed=True, results=content['result'], state="absent") + + # Create and Update + if state == 'present': + + dependencies = get_service_id_by_name(zapi, module.params['dependencies']) + params = {'name': module.params['name'], + 'algorithm': get_algorithm(module.params['algorithm']), + 'showsla': get_show_sla(module.params['show_sla']), + 'goodsla': module.params['good_sla'], + 'sortorder': module.params['sort_order'], + 'triggerid': module.params['trigger_id'] + } + + # Remove any None valued params + _ = [params.pop(key, None) for key in params.keys() if params[key] is None] + + #******# + # CREATE + #******# + if not exists(content): + content = zapi.get_content(zbx_class_name, 'create', params) + + if content.has_key('error'): + module.exit_json(failed=True, changed=True, results=content['error'], state="present") + + if dependencies: + content = add_dependencies(zapi, module.params['name'], dependencies) + + if content.has_key('error'): + module.exit_json(failed=True, changed=True, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state='present') + + + ######## + # UPDATE + ######## + params['dependencies'] = dependencies + differences = {} + zab_results = content['result'][0] + for key, value in params.items(): + + if key == 'goodsla': + if float(value) != float(zab_results[key]): + differences[key] = value + + elif key == 'dependencies': + zab_dep_ids = [item['serviceid'] for item in zab_results[key]] + user_dep_ids = [item['serviceid'] for item in dependencies] + if set(zab_dep_ids) != set(user_dep_ids): + differences[key] = format_dependencies(dependencies) + + elif zab_results[key] != value and zab_results[key] != str(value): + differences[key] = value + + if not differences: + module.exit_json(changed=False, results=zab_results, state="present") + + differences['serviceid'] = zab_results['serviceid'] + content = zapi.get_content(zbx_class_name, 'update', differences) + + if content.has_key('error'): + module.exit_json(failed=True, changed=False, results=content['error'], state="present") + + module.exit_json(changed=True, results=content['result'], state="present") + + module.exit_json(failed=True, + changed=False, + results='Unknown state passed. %s' % state, + state="unknown") + +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled +# import module snippets. This are required +from ansible.module_utils.basic import * + +main() diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py index ab7731faa..b5faefa70 100644 --- a/roles/lib_zabbix/library/zbx_trigger.py +++ b/roles/lib_zabbix/library/zbx_trigger.py @@ -136,6 +136,8 @@ def main(): status=dict(default=None, type='str'), state=dict(default='present', type='str'), template_name=dict(default=None, type='str'), + hostgroup_name=dict(default=None, type='str'), + query_type=dict(default='filter', choices=['filter', 'search'], type='str'), ), #supports_check_mode=True ) @@ -157,10 +159,11 @@ def main(): content = zapi.get_content(zbx_class_name, 'get', - {'filter': {'description': tname}, + {module.params['query_type']: {'description': tname}, 'expandExpression': True, 'selectDependencies': 'triggerid', 'templateids': templateid, + 'group': module.params['hostgroup_name'], }) # Get diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml index ac9cf756b..44c4e6766 100644 --- a/roles/lib_zabbix/tasks/create_template.yml +++ b/roles/lib_zabbix/tasks/create_template.yml @@ -84,6 +84,10 @@ template_name: "{{ template.name }}" applications: "{{ item.applications }}" description: "{{ item.description | default('', True) }}" + multiplier: "{{ item.multiplier | default('', True) }}" + units: "{{ item.units | default('', True) }}" + interval: "{{ item.interval | default(60, True) }}" + delta: "{{ item.delta | default(0, True) }}" with_items: template.zitemprototypes when: template.zitemprototypes is defined diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index 9cc15c0a8..f6919dada 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -1,12 +1,16 @@ --- - yum: - name: openshift-ansible-inventory + name: "{{ item }}" state: present + with_items: + - openshift-ansible-inventory + - openshift-ansible-inventory-aws + - openshift-ansible-inventory-gce - name: copy: content: "{{ oo_inventory_accounts | to_nice_yaml }}" - dest: /etc/ansible/multi_ec2.yaml + dest: /etc/ansible/multi_inventory.yaml group: "{{ oo_inventory_group }}" owner: "{{ oo_inventory_owner }}" mode: "0640" @@ -20,17 +24,17 @@ - file: state: link - src: /usr/share/ansible/inventory/multi_ec2.py - dest: /etc/ansible/inventory/multi_ec2.py + src: /usr/share/ansible/inventory/multi_inventory.py + dest: /etc/ansible/inventory/multi_inventory.py owner: root group: libra_ops # This cron uses the above location to call its job - name: Cron to keep cache fresh cron: - name: 'multi_ec2_inventory' + name: 'multi_inventory' minute: '*/10' - job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null' + job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null' when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache - name: Set cache location diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 73bd28630..38d5a08e4 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,4 +1,8 @@ --- +- fail: + msg: Flannel can not be used with openshift sdn + when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool + - name: Set common Cluster facts openshift_facts: role: common @@ -13,6 +17,7 @@ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" use_fluentd: "{{ openshift_use_fluentd | default(None) }}" + use_flannel: "{{ openshift_use_flannel | default(None) }}" - name: Set hostname hostname: name={{ openshift.common.hostname }} diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 2043985ec..8e8bc6868 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -14,5 +14,7 @@ db_templates_base: "{{ examples_base }}/db-templates" xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json" xpaas_templates_base: "{{ examples_base }}/xpaas-templates" quickstarts_base: "{{ examples_base }}/quickstart-templates" +infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin" +infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise" openshift_examples_import_command: "create" diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 21137e31b..a261a6ddd 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -5,17 +5,20 @@ # # This script should be run from openshift-ansible/roles/openshift_examples +XPAAS_VERSION=ose-v1.1.0 EXAMPLES_BASE=$(pwd)/files/examples find files/examples -name '*.json' -delete +find files/examples -name '*.yaml' -delete TEMP=`mktemp -d` pushd $TEMP + wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip wget https://github.com/openshift/django-ex/archive/master.zip -O django-ex-master.zip wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master.zip wget https://github.com/openshift/nodejs-ex/archive/master.zip -O nodejs-ex-master.zip wget https://github.com/openshift/dancer-ex/archive/master.zip -O dancer-ex-master.zip wget https://github.com/openshift/cakephp-ex/archive/master.zip -O cakephp-ex-master.zip -wget https://github.com/jboss-openshift/application-templates/archive/ose-v1.0.2.zip -O application-templates-master.zip +wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip unzip origin-master.zip unzip django-ex-master.zip unzip rails-ex-master.zip @@ -31,7 +34,13 @@ cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ cp nodejs-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/ -mv application-templates-master/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/ -find application-templates-master/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \; +mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/ +find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \; + +wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml +cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/ +wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml +wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml + popd git diff files/examples diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json index 268d680f4..1a78b1279 100644 --- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json @@ -11,10 +11,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/ruby-20-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "2.0" + } }, { "name": "2.0", @@ -27,8 +30,23 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/ruby-20-centos7:latest" + } + }, + { + "name": "2.2", + "annotations": { + "description": "Build and run Ruby 2.2 applications", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.2,ruby", + "version": "2.2", + "sampleRepo": "https://github.com/openshift/ruby-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "centos/ruby-22-centos7:latest" } } ] @@ -42,10 +60,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/nodejs-010-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "0.10" + } }, { "name": "0.10", @@ -58,8 +79,8 @@ "sampleRepo": "https://github.com/openshift/nodejs-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/nodejs-010-centos7:latest" } } ] @@ -73,10 +94,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/perl-516-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.16" + } }, { "name": "5.16", @@ -89,9 +113,25 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/perl-516-centos7:latest" } + }, + { + "name": "5.20", + "annotations": { + "description": "Build and run Perl 5.20 applications", + "iconClass": "icon-perl", + "tags": "builder,perl", + "supports":"perl:5.20,perl", + "version": "5.20", + "sampleRepo": "https://github.com/openshift/dancer-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "centos/perl-520-centos7:latest" + } + } ] } @@ -104,10 +144,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/php-55-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.5" + } }, { "name": "5.5", @@ -120,8 +163,23 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/php-55-centos7:latest" + } + }, + { + "name": "5.6", + "annotations": { + "description": "Build and run PHP 5.6 applications", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:5.6,php", + "version": "5.6", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "centos/php-56-centos7:latest" } } ] @@ -135,10 +193,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/python-33-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "3.3" + } }, { "name": "3.3", @@ -151,8 +212,38 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/python-33-centos7:latest" + } + }, + { + "name": "2.7", + "annotations": { + "description": "Build and run Python 2.7 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:2.7,python", + "version": "2.7", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "centos/python-27-centos7:latest" + } + }, + { + "name": "3.4", + "annotations": { + "description": "Build and run Python 3.4 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.4,python", + "version": "3.4", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "centos/python-34-centos7:latest" } } ] @@ -166,10 +257,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/wildfly-81-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "8.1" + } }, { "name": "8.1", @@ -182,8 +276,8 @@ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/wildfly-81-centos7:latest" } } ] @@ -197,16 +291,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/mysql-55-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.5" + } }, { "name": "5.5", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/mysql-55-centos7:latest" + } + }, + { + "name": "5.6", + "from": { + "Kind": "DockerImage", + "Name": "centos/mysql-56-centos7:latest" } } ] @@ -220,16 +324,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/postgresql-92-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "9.2" + } }, { "name": "9.2", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/postgresql-92-centos7:latest" + } + }, + { + "name": "9.4", + "from": { + "Kind": "DockerImage", + "Name": "centos/postgresql-94-centos7:latest" } } ] @@ -243,16 +357,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/mongodb-24-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "2.4" + } }, { "name": "2.4", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/mongodb-24-centos7:latest" + } + }, + { + "name": "2.6", + "from": { + "Kind": "DockerImage", + "Name": "centos/mongodb-26-centos7:latest" } } ] @@ -266,16 +390,19 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "openshift/jenkins-1-centos7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "1" + } }, { "name": "1", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "openshift/jenkins-1-centos7:latest" } } ] diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json index aa62ebd53..d2a8cfb1d 100644 --- a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json @@ -11,10 +11,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/ruby-20-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "2.0" + } }, { "name": "2.0", @@ -27,8 +30,23 @@ "sampleRepo": "https://github.com/openshift/ruby-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest" + } + }, + { + "name": "2.2", + "annotations": { + "description": "Build and run Ruby 2.2 applications", + "iconClass": "icon-ruby", + "tags": "builder,ruby", + "supports": "ruby:2.2,ruby", + "version": "2.2", + "sampleRepo": "https://github.com/openshift/ruby-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest" } } ] @@ -42,10 +60,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/nodejs-010-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "0.10" + } }, { "name": "0.10", @@ -58,8 +79,8 @@ "sampleRepo": "https://github.com/openshift/nodejs-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest" } } ] @@ -73,10 +94,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/perl-516-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.16" + } }, { "name": "5.16", @@ -89,9 +113,25 @@ "sampleRepo": "https://github.com/openshift/dancer-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest" + } + }, + { + "name": "5.20", + "annotations": { + "description": "Build and run Perl 5.20 applications", + "iconClass": "icon-perl", + "tags": "builder,perl", + "supports":"perl:5.20,perl", + "version": "5.20", + "sampleRepo": "https://github.com/openshift/dancer-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest" } + } ] } @@ -104,10 +144,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/php-55-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.5" + } }, { "name": "5.5", @@ -120,8 +163,23 @@ "sampleRepo": "https://github.com/openshift/cakephp-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest" + } + }, + { + "name": "5.6", + "annotations": { + "description": "Build and run PHP 5.6 applications", + "iconClass": "icon-php", + "tags": "builder,php", + "supports":"php:5.6,php", + "version": "5.6", + "sampleRepo": "https://github.com/openshift/cakephp-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest" } } ] @@ -135,10 +193,13 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/python-33-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "3.3" + } }, { "name": "3.3", @@ -151,8 +212,38 @@ "sampleRepo": "https://github.com/openshift/django-ex.git" }, "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest" + } + }, + { + "name": "2.7", + "annotations": { + "description": "Build and run Python 2.7 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:2.7,python", + "version": "2.7", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest" + } + }, + { + "name": "3.4", + "annotations": { + "description": "Build and run Python 3.4 applications", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.4,python", + "version": "3.4", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest" } } ] @@ -166,16 +257,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/mysql-55-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "5.5" + } }, { "name": "5.5", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest" + } + }, + { + "name": "5.6", + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest" } } ] @@ -189,16 +290,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/postgresql-92-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "9.2" + } }, { "name": "9.2", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest" + } + }, + { + "name": "9.4", + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest" } } ] @@ -212,16 +323,26 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/mongodb-24-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "2.4" + } }, { "name": "2.4", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest" + } + }, + { + "name": "2.6", + "from": { + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest" } } ] @@ -235,16 +356,19 @@ "creationTimestamp": null }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/openshift3/jenkins-1-rhel7", "tags": [ { - "name": "latest" + "name": "latest", + "from": { + "Kind": "ImageStreamTag", + "Name": "1" + } }, { "name": "1", "from": { - "Kind": "ImageStreamTag", - "Name": "latest" + "Kind": "DockerImage", + "Name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest" } } ] diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml new file mode 100644 index 000000000..b3b60bf9b --- /dev/null +++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml @@ -0,0 +1,151 @@ +apiVersion: "v1" +kind: "Template" +metadata: + name: logging-deployer-template + annotations: + description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret." + tags: "infrastructure" +labels: + logging-infra: deployer + provider: openshift + component: deployer +objects: +- + apiVersion: v1 + kind: Pod + metadata: + generateName: logging-deployer- + spec: + containers: + - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} + imagePullPolicy: Always + name: deployer + volumeMounts: + - name: secret + mountPath: /secret + readOnly: true + - name: empty + mountPath: /etc/deploy + env: + - name: PROJECT + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: IMAGE_PREFIX + value: ${IMAGE_PREFIX} + - name: IMAGE_VERSION + value: ${IMAGE_VERSION} + - name: ENABLE_OPS_CLUSTER + value: ${ENABLE_OPS_CLUSTER} + - name: KIBANA_HOSTNAME + value: ${KIBANA_HOSTNAME} + - name: KIBANA_OPS_HOSTNAME + value: ${KIBANA_OPS_HOSTNAME} + - name: PUBLIC_MASTER_URL + value: ${PUBLIC_MASTER_URL} + - name: MASTER_URL + value: ${MASTER_URL} + - name: ES_INSTANCE_RAM + value: ${ES_INSTANCE_RAM} + - name: ES_CLUSTER_SIZE + value: ${ES_CLUSTER_SIZE} + - name: ES_NODE_QUORUM + value: ${ES_NODE_QUORUM} + - name: ES_RECOVER_AFTER_NODES + value: ${ES_RECOVER_AFTER_NODES} + - name: ES_RECOVER_EXPECTED_NODES + value: ${ES_RECOVER_EXPECTED_NODES} + - name: ES_RECOVER_AFTER_TIME + value: ${ES_RECOVER_AFTER_TIME} + - name: ES_OPS_INSTANCE_RAM + value: ${ES_OPS_INSTANCE_RAM} + - name: ES_OPS_CLUSTER_SIZE + value: ${ES_OPS_CLUSTER_SIZE} + - name: ES_OPS_NODE_QUORUM + value: ${ES_OPS_NODE_QUORUM} + - name: ES_OPS_RECOVER_AFTER_NODES + value: ${ES_OPS_RECOVER_AFTER_NODES} + - name: ES_OPS_RECOVER_EXPECTED_NODES + value: ${ES_OPS_RECOVER_EXPECTED_NODES} + - name: ES_OPS_RECOVER_AFTER_TIME + value: ${ES_OPS_RECOVER_AFTER_TIME} + dnsPolicy: ClusterFirst + restartPolicy: Never + serviceAccount: logging-deployer + volumes: + - name: empty + emptyDir: {} + - name: secret + secret: + secretName: logging-deployer +parameters: +- + description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "registry.access.redhat.com/openshift3/" +- + description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "3.1.0" +- + description: "If true, set up to use a second ES cluster for ops logs." + name: ENABLE_OPS_CLUSTER + value: "false" +- + description: "External hostname where clients will reach kibana" + name: KIBANA_HOSTNAME + required: true +- + description: "External hostname at which admins will visit the ops Kibana." + name: KIBANA_OPS_HOSTNAME + value: kibana-ops.example.com +- + description: "External URL for the master, for OAuth purposes" + name: PUBLIC_MASTER_URL + required: true +- + description: "Internal URL for the master, for authentication retrieval" + name: MASTER_URL + value: "https://kubernetes.default.svc.cluster.local" +- + description: "Amount of RAM to reserve per ElasticSearch instance." + name: ES_INSTANCE_RAM + value: "8G" +- + description: "How many instances of ElasticSearch to deploy." + name: ES_CLUSTER_SIZE + required: true +- + description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + name: ES_NODE_QUORUM +- + description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." + name: ES_RECOVER_AFTER_NODES +- + description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." + name: ES_RECOVER_EXPECTED_NODES +- + description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart." + name: ES_RECOVER_AFTER_TIME + value: "5m" +- + description: "Amount of RAM to reserve per ops ElasticSearch instance." + name: ES_OPS_INSTANCE_RAM + value: "8G" +- + description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." + name: ES_OPS_CLUSTER_SIZE +- + description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + name: ES_OPS_NODE_QUORUM +- + description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." + name: ES_OPS_RECOVER_AFTER_NODES +- + description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." + name: ES_OPS_RECOVER_EXPECTED_NODES +- + description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." + name: ES_OPS_RECOVER_AFTER_TIME + value: "5m" + diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml new file mode 100644 index 000000000..d823b2587 --- /dev/null +++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -0,0 +1,116 @@ +#!/bin/bash +# +# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: "v1" +kind: "Template" +metadata: + name: metrics-deployer-template + annotations: + description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." + tags: "infrastructure" +labels: + metrics-infra: deployer + provider: openshift + component: deployer +objects: +- + apiVersion: v1 + kind: Pod + metadata: + generateName: metrics-deployer- + spec: + containers: + - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} + name: deployer + volumeMounts: + - name: secret + mountPath: /secret + readOnly: true + - name: empty + mountPath: /etc/deploy + env: + - name: PROJECT + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: IMAGE_PREFIX + value: ${IMAGE_PREFIX} + - name: IMAGE_VERSION + value: ${IMAGE_VERSION} + - name: PUBLIC_MASTER_URL + value: ${PUBLIC_MASTER_URL} + - name: MASTER_URL + value: ${MASTER_URL} + - name: REDEPLOY + value: ${REDEPLOY} + - name: USE_PERSISTENT_STORAGE + value: ${USE_PERSISTENT_STORAGE} + - name: HAWKULAR_METRICS_HOSTNAME + value: ${HAWKULAR_METRICS_HOSTNAME} + - name: CASSANDRA_NODES + value: ${CASSANDRA_NODES} + - name: CASSANDRA_PV_SIZE + value: ${CASSANDRA_PV_SIZE} + - name: METRIC_DURATION + value: ${METRIC_DURATION} + dnsPolicy: ClusterFirst + restartPolicy: Never + serviceAccount: metrics-deployer + volumes: + - name: empty + emptyDir: {} + - name: secret + secret: + secretName: metrics-deployer +parameters: +- + description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "hawkular/" +- + description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "0.7.0-SNAPSHOT" +- + description: "Internal URL for the master, for authentication retrieval" + name: MASTER_URL + value: "https://kubernetes.default.svc:443" +- + description: "External hostname where clients will reach Hawkular Metrics" + name: HAWKULAR_METRICS_HOSTNAME + required: true +- + description: "If set to true the deployer will try and delete all the existing components before trying to redeploy." + name: REDEPLOY + value: "false" +- + description: "Set to true for persistent storage, set to false to use non persistent storage" + name: USE_PERSISTENT_STORAGE + value: "true" +- + description: "The number of Cassandra Nodes to deploy for the initial cluster" + name: CASSANDRA_NODES + value: "1" +- + description: "The persistent volume size for each of the Cassandra nodes" + name: CASSANDRA_PV_SIZE + value: "1Gi" +- + description: "How many days metrics should be stored for." + name: METRIC_DURATION + value: "7" diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml new file mode 100644 index 000000000..4c798e148 --- /dev/null +++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml @@ -0,0 +1,151 @@ +apiVersion: "v1" +kind: "Template" +metadata: + name: logging-deployer-template + annotations: + description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret." + tags: "infrastructure" +labels: + logging-infra: deployer + provider: openshift + component: deployer +objects: +- + apiVersion: v1 + kind: Pod + metadata: + generateName: logging-deployer- + spec: + containers: + - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} + imagePullPolicy: Always + name: deployer + volumeMounts: + - name: secret + mountPath: /secret + readOnly: true + - name: empty + mountPath: /etc/deploy + env: + - name: PROJECT + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: IMAGE_PREFIX + value: ${IMAGE_PREFIX} + - name: IMAGE_VERSION + value: ${IMAGE_VERSION} + - name: ENABLE_OPS_CLUSTER + value: ${ENABLE_OPS_CLUSTER} + - name: KIBANA_HOSTNAME + value: ${KIBANA_HOSTNAME} + - name: KIBANA_OPS_HOSTNAME + value: ${KIBANA_OPS_HOSTNAME} + - name: PUBLIC_MASTER_URL + value: ${PUBLIC_MASTER_URL} + - name: MASTER_URL + value: ${MASTER_URL} + - name: ES_INSTANCE_RAM + value: ${ES_INSTANCE_RAM} + - name: ES_CLUSTER_SIZE + value: ${ES_CLUSTER_SIZE} + - name: ES_NODE_QUORUM + value: ${ES_NODE_QUORUM} + - name: ES_RECOVER_AFTER_NODES + value: ${ES_RECOVER_AFTER_NODES} + - name: ES_RECOVER_EXPECTED_NODES + value: ${ES_RECOVER_EXPECTED_NODES} + - name: ES_RECOVER_AFTER_TIME + value: ${ES_RECOVER_AFTER_TIME} + - name: ES_OPS_INSTANCE_RAM + value: ${ES_OPS_INSTANCE_RAM} + - name: ES_OPS_CLUSTER_SIZE + value: ${ES_OPS_CLUSTER_SIZE} + - name: ES_OPS_NODE_QUORUM + value: ${ES_OPS_NODE_QUORUM} + - name: ES_OPS_RECOVER_AFTER_NODES + value: ${ES_OPS_RECOVER_AFTER_NODES} + - name: ES_OPS_RECOVER_EXPECTED_NODES + value: ${ES_OPS_RECOVER_EXPECTED_NODES} + - name: ES_OPS_RECOVER_AFTER_TIME + value: ${ES_OPS_RECOVER_AFTER_TIME} + dnsPolicy: ClusterFirst + restartPolicy: Never + serviceAccount: logging-deployer + volumes: + - name: empty + emptyDir: {} + - name: secret + secret: + secretName: logging-deployer +parameters: +- + description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "docker.io/openshift/origin-" +- + description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "latest" +- + description: "If true, set up to use a second ES cluster for ops logs." + name: ENABLE_OPS_CLUSTER + value: "false" +- + description: "External hostname where clients will reach kibana" + name: KIBANA_HOSTNAME + required: true +- + description: "External hostname at which admins will visit the ops Kibana." + name: KIBANA_OPS_HOSTNAME + value: kibana-ops.example.com +- + description: "External URL for the master, for OAuth purposes" + name: PUBLIC_MASTER_URL + required: true +- + description: "Internal URL for the master, for authentication retrieval" + name: MASTER_URL + value: "https://kubernetes.default.svc.cluster.local" +- + description: "Amount of RAM to reserve per ElasticSearch instance." + name: ES_INSTANCE_RAM + value: "8G" +- + description: "How many instances of ElasticSearch to deploy." + name: ES_CLUSTER_SIZE + required: true +- + description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + name: ES_NODE_QUORUM +- + description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." + name: ES_RECOVER_AFTER_NODES +- + description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." + name: ES_RECOVER_EXPECTED_NODES +- + description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart." + name: ES_RECOVER_AFTER_TIME + value: "5m" +- + description: "Amount of RAM to reserve per ops ElasticSearch instance." + name: ES_OPS_INSTANCE_RAM + value: "8G" +- + description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." + name: ES_OPS_CLUSTER_SIZE +- + description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + name: ES_OPS_NODE_QUORUM +- + description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." + name: ES_OPS_RECOVER_AFTER_NODES +- + description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." + name: ES_OPS_RECOVER_EXPECTED_NODES +- + description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." + name: ES_OPS_RECOVER_AFTER_TIME + value: "5m" + diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml new file mode 100644 index 000000000..d823b2587 --- /dev/null +++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml @@ -0,0 +1,116 @@ +#!/bin/bash +# +# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: "v1" +kind: "Template" +metadata: + name: metrics-deployer-template + annotations: + description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." + tags: "infrastructure" +labels: + metrics-infra: deployer + provider: openshift + component: deployer +objects: +- + apiVersion: v1 + kind: Pod + metadata: + generateName: metrics-deployer- + spec: + containers: + - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} + name: deployer + volumeMounts: + - name: secret + mountPath: /secret + readOnly: true + - name: empty + mountPath: /etc/deploy + env: + - name: PROJECT + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: IMAGE_PREFIX + value: ${IMAGE_PREFIX} + - name: IMAGE_VERSION + value: ${IMAGE_VERSION} + - name: PUBLIC_MASTER_URL + value: ${PUBLIC_MASTER_URL} + - name: MASTER_URL + value: ${MASTER_URL} + - name: REDEPLOY + value: ${REDEPLOY} + - name: USE_PERSISTENT_STORAGE + value: ${USE_PERSISTENT_STORAGE} + - name: HAWKULAR_METRICS_HOSTNAME + value: ${HAWKULAR_METRICS_HOSTNAME} + - name: CASSANDRA_NODES + value: ${CASSANDRA_NODES} + - name: CASSANDRA_PV_SIZE + value: ${CASSANDRA_PV_SIZE} + - name: METRIC_DURATION + value: ${METRIC_DURATION} + dnsPolicy: ClusterFirst + restartPolicy: Never + serviceAccount: metrics-deployer + volumes: + - name: empty + emptyDir: {} + - name: secret + secret: + secretName: metrics-deployer +parameters: +- + description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "hawkular/" +- + description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "0.7.0-SNAPSHOT" +- + description: "Internal URL for the master, for authentication retrieval" + name: MASTER_URL + value: "https://kubernetes.default.svc:443" +- + description: "External hostname where clients will reach Hawkular Metrics" + name: HAWKULAR_METRICS_HOSTNAME + required: true +- + description: "If set to true the deployer will try and delete all the existing components before trying to redeploy." + name: REDEPLOY + value: "false" +- + description: "Set to true for persistent storage, set to false to use non persistent storage" + name: USE_PERSISTENT_STORAGE + value: "true" +- + description: "The number of Cassandra Nodes to deploy for the initial cluster" + name: CASSANDRA_NODES + value: "1" +- + description: "The persistent volume size for each of the Cassandra nodes" + name: CASSANDRA_PV_SIZE + value: "1Gi" +- + description: "How many days metrics should be stored for." + name: METRIC_DURATION + value: "7" diff --git a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json index 37e6269fe..aaf5569ae 100644 --- a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json @@ -12,19 +12,21 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { - "name": "jboss-webserver3-tomcat7-openshift" + "name": "jboss-webserver30-tomcat7-openshift" }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat7-openshift", + "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift", "tags": [ { - "name": "3.0", + "name": "1.1", "annotations": { - "description": "JBoss Web Server v3 Tomcat 7 STI images.", + "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.", "iconClass": "icon-jboss", - "tags": "java", - "supports":"tomcat7:3.0,java", - "version": "3.0" + "tags": "builder,tomcat,tomcat7,java,jboss,xpaas", + "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1", + "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "sampleContextDir": "tomcat-websocket-chat", + "version": "1.1" } } ] @@ -34,19 +36,21 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { - "name": "jboss-webserver3-tomcat8-openshift" + "name": "jboss-webserver30-tomcat8-openshift" }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/tomcat8-openshift", + "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift", "tags": [ { - "name": "3.0", + "name": "1.1", "annotations": { - "description": "JBoss Web Server v3 Tomcat 8 STI images.", + "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.", "iconClass": "icon-jboss", - "tags": "java", - "supports":"tomcat8:3.0,java", - "version": "3.0" + "tags": "builder,tomcat,tomcat8,java,jboss,xpaas", + "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1", + "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "sampleContextDir": "tomcat-websocket-chat", + "version": "1.1" } } ] @@ -56,19 +60,22 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { - "name": "jboss-eap6-openshift" + "name": "jboss-eap64-openshift" }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap-openshift", + "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift", "tags": [ { - "name": "6.4", + "name": "1.1", "annotations": { - "description": "JBoss EAP 6 STI images.", + "description": "JBoss EAP 6.4 S2I images.", "iconClass": "icon-jboss", - "tags": "javaee", - "supports":"eap:6.4,jee,java", - "version": "6.4" + "tags": "builder,eap,javaee,java,jboss,xpaas", + "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1", + "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git", + "sampleContextDir": "kitchensink", + "sampleRef": "6.4.x", + "version": "1.1" } } ] @@ -78,19 +85,19 @@ "kind": "ImageStream", "apiVersion": "v1", "metadata": { - "name": "jboss-amq-6" + "name": "jboss-amq-62" }, "spec": { - "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq-openshift", + "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift", "tags": [ { - "name": "6.2", + "name": "1.1", "annotations": { - "description": "JBoss ActiveMQ 6 broker image.", + "description": "JBoss A-MQ 6.2 broker image.", "iconClass": "icon-jboss", - "tags": "javaee", - "supports":"amq:6.2,jee,java", - "version": "6.2" + "tags": "messaging,amq,jboss,xpaas", + "supports":"amq:6.2,messaging,xpaas:1.1", + "version": "1.1" } } ] diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json new file mode 100644 index 000000000..3fd04c28c --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json @@ -0,0 +1,325 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "version": "1.1.0" + }, + "name": "amq62-basic" + }, + "labels": { + "template": "amq62-basic", + "xpaas": "1.1.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "broker", + "required": true + }, + { + "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.", + "name": "MQ_PROTOCOL", + "value": "openwire", + "required": false + }, + { + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": false + }, + { + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": false + }, + { + "description": "User name for admin user. If left empty, it will be generated.", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Password for admin user. If left empty, it will be generated.", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "kube", + "required": false + }, + { + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5672, + "targetPort": 5672 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's AMQP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 1883, + "targetPort": 1883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's MQTT port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61613, + "targetPort": 61613 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's STOMP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's OpenWire port." + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "jboss-amq-62", + "imagePullPolicy": "Always", + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_TRANSPORTS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + }, + { + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "${AMQ_MESH_DISCOVERY_TYPE}" + }, + { + "name": "AMQ_MESH_SERVICE_NAME", + "value": "${APPLICATION_NAME}-amq-tcp" + }, + { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" + } + ] + } + ] + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json index 5cbc7ee7e..aa9e716cf 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/amq6-persistent.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json @@ -3,82 +3,117 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for ActiveMQ brokers using persistent storage." + "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "version": "1.1.0" }, - "name": "amq6-persistent" + "name": "amq62-persistent-ssl" }, "labels": { - "template": "amq6-persistent" + "template": "amq62-persistent-ssl", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "ActiveMQ Release version, e.g. 6.2, etc.", - "name": "AMQ_RELEASE", - "value": "6.2" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "broker" + "value": "broker", + "required": true }, { - "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.", "name": "MQ_PROTOCOL", - "value": "openwire" + "value": "openwire", + "required": false }, { - "description": "Queue names", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.", "name": "MQ_QUEUES", - "value": "" + "value": "", + "required": false }, { - "description": "Topic names", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.", "name": "MQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { - "description": "Broker user name", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "Broker user password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "ActiveMQ Admin User", + "description": "User name for admin user. If left empty, it will be generated.", "name": "AMQ_ADMIN_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "ActiveMQ Admin Password", + "description": "Password for admin user. If left empty, it will be generated.", "name": "AMQ_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Name of a secret containing SSL related files", "name": "AMQ_SECRET", - "value": "amq-app-secret" + "value": "amq-app-secret", + "required": true }, { "description": "SSL trust store filename", "name": "AMQ_TRUSTSTORE", - "value": "broker.ts" + "value": "broker.ts", + "required": true + }, + { + "description": "SSL trust store password", + "name": "AMQ_TRUSTSTORE_PASSWORD", + "value": "", + "required": true }, { "description": "SSL key store filename", "name": "AMQ_KEYSTORE", - "value": "broker.ks" + "value": "broker.ks", + "required": true + }, + { + "description": "Password for accessing SSL keystore", + "name": "AMQ_KEYSTORE_PASSWORD", + "value": "", + "required": true + }, + { + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -102,7 +137,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's amqp port." + "description": "The broker's AMQP port." } } }, @@ -126,7 +161,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's amqp ssl port." + "description": "The broker's AMQP SSL port." } } }, @@ -150,7 +185,31 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's mqtt port." + "description": "The broker's MQTT port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8883, + "targetPort": 8883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's MQTT SSL port." } } }, @@ -174,7 +233,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's stomp port." + "description": "The broker's STOMP port." } } }, @@ -198,7 +257,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's stomp ssl port." + "description": "The broker's STOMP SSL port." } } }, @@ -222,7 +281,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp (openwire) port." + "description": "The broker's OpenWire port." } } }, @@ -246,7 +305,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp ssl (openwire) port." + "description": "The broker's OpenWire (SSL) port." } } }, @@ -273,10 +332,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-amq-6:${AMQ_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -293,10 +355,11 @@ }, "spec": { "serviceAccount": "amq-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-amq", - "image": "jboss-amq-6", + "image": "jboss-amq-62", "imagePullPolicy": "Always", "volumeMounts": [ { @@ -335,6 +398,11 @@ "protocol": "TCP" }, { + "name": "mqtt-ssl", + "containerPort": 8883, + "protocol": "TCP" + }, + { "name": "stomp", "containerPort": 61613, "protocol": "TCP" @@ -365,7 +433,7 @@ "value": "${MQ_PASSWORD}" }, { - "name": "AMQ_PROTOCOLS", + "name": "AMQ_TRANSPORTS", "value": "${MQ_PROTOCOL}" }, { @@ -393,8 +461,20 @@ "value": "${AMQ_TRUSTSTORE}" }, { + "name": "AMQ_TRUSTSTORE_PASSWORD", + "value": "${AMQ_TRUSTSTORE_PASSWORD}" + }, + { "name": "AMQ_KEYSTORE", "value": "${AMQ_KEYSTORE}" + }, + { + "name": "AMQ_KEYSTORE_PASSWORD", + "value": "${AMQ_KEYSTORE_PASSWORD}" + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" } ] } @@ -427,7 +507,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json new file mode 100644 index 000000000..3a2db3ce9 --- /dev/null +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json @@ -0,0 +1,343 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "annotations": { + "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "version": "1.1.0" + }, + "name": "amq62-persistent" + }, + "labels": { + "template": "amq62-persistent", + "xpaas": "1.1.0" + }, + "parameters": [ + { + "description": "The name for the application.", + "name": "APPLICATION_NAME", + "value": "broker", + "required": true + }, + { + "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.", + "name": "MQ_PROTOCOL", + "value": "openwire", + "required": false + }, + { + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.", + "name": "MQ_QUEUES", + "value": "", + "required": false + }, + { + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.", + "name": "MQ_TOPICS", + "value": "", + "required": false + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true + }, + { + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": false + }, + { + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", + "name": "MQ_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": false + }, + { + "description": "User name for admin user. If left empty, it will be generated.", + "name": "AMQ_ADMIN_USERNAME", + "from": "user[a-zA-Z0-9]{3}", + "generate": "expression", + "required": true + }, + { + "description": "Password for admin user. If left empty, it will be generated.", + "name": "AMQ_ADMIN_PASSWORD", + "from": "[a-zA-Z0-9]{8}", + "generate": "expression", + "required": true + }, + { + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true + } + ], + "objects": [ + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 5672, + "targetPort": 5672 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-amqp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's AMQP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 1883, + "targetPort": 1883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's MQTT port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61613, + "targetPort": 61613 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-stomp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's STOMP port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 61616, + "targetPort": 61616 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-tcp", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's OpenWire port." + } + } + }, + { + "kind": "DeploymentConfig", + "apiVersion": "v1", + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "strategy": { + "type": "Recreate" + }, + "triggers": [ + { + "type": "ImageChange", + "imageChangeParams": { + "automatic": true, + "containerNames": [ + "${APPLICATION_NAME}-amq" + ], + "from": { + "kind": "ImageStreamTag", + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" + } + } + }, + { + "type": "ConfigChange" + } + ], + "replicas": 1, + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + }, + "template": { + "metadata": { + "name": "${APPLICATION_NAME}-amq", + "labels": { + "deploymentConfig": "${APPLICATION_NAME}-amq", + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "terminationGracePeriodSeconds": 60, + "containers": [ + { + "name": "${APPLICATION_NAME}-amq", + "image": "jboss-amq-62", + "imagePullPolicy": "Always", + "volumeMounts": [ + { + "mountPath": "/opt/amq/data/kahadb", + "name": "${APPLICATION_NAME}-amq-pvol" + } + ], + "readinessProbe": { + "exec": { + "command": [ + "/bin/bash", + "-c", + "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'" + ] + } + }, + "ports": [ + { + "name": "amqp", + "containerPort": 5672, + "protocol": "TCP" + }, + { + "name": "mqtt", + "containerPort": 1883, + "protocol": "TCP" + }, + { + "name": "stomp", + "containerPort": 61613, + "protocol": "TCP" + }, + { + "name": "tcp", + "containerPort": 61616, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "AMQ_USER", + "value": "${MQ_USERNAME}" + }, + { + "name": "AMQ_PASSWORD", + "value": "${MQ_PASSWORD}" + }, + { + "name": "AMQ_TRANSPORTS", + "value": "${MQ_PROTOCOL}" + }, + { + "name": "AMQ_QUEUES", + "value": "${MQ_QUEUES}" + }, + { + "name": "AMQ_TOPICS", + "value": "${MQ_TOPICS}" + }, + { + "name": "AMQ_ADMIN_USERNAME", + "value": "${AMQ_ADMIN_USERNAME}" + }, + { + "name": "AMQ_ADMIN_PASSWORD", + "value": "${AMQ_ADMIN_PASSWORD}" + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" + } + ] + } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-amq-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-amq-claim" + } + } + ] + } + } + } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-amq-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } + } + ] +} diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json b/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json index 7decdfe52..f61fb24c2 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/amq6.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json @@ -3,77 +3,117 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for ActiveMQ brokers." + "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.", + "iconClass": "icon-jboss", + "tags": "messaging,amq,jboss,xpaas", + "version": "1.1.0" }, - "name": "amq6" + "name": "amq62-ssl" }, "labels": { - "template": "amq6" + "template": "amq62-ssl", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "ActiveMQ Release version, e.g. 6.2, etc.", - "name": "AMQ_RELEASE", - "value": "6.2" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "broker" + "value": "broker", + "required": true }, { - "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.", "name": "MQ_PROTOCOL", - "value": "openwire" + "value": "openwire", + "required": false }, { - "description": "Queue names", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.", "name": "MQ_QUEUES", - "value": "" + "value": "", + "required": false }, { - "description": "Topic names", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.", "name": "MQ_TOPICS", - "value": "" + "value": "", + "required": false }, { - "description": "Broker user name", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "Broker user password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "ActiveMQ Admin User", + "description": "User name for admin user. If left empty, it will be generated.", "name": "AMQ_ADMIN_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "ActiveMQ Admin Password", + "description": "Password for admin user. If left empty, it will be generated.", "name": "AMQ_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Name of a secret containing SSL related files", "name": "AMQ_SECRET", - "value": "amq-app-secret" + "value": "amq-app-secret", + "required": true }, { "description": "SSL trust store filename", "name": "AMQ_TRUSTSTORE", - "value": "broker.ts" + "value": "broker.ts", + "required": true + }, + { + "description": "SSL trust store password", + "name": "AMQ_TRUSTSTORE_PASSWORD", + "value": "", + "required": true }, { "description": "SSL key store filename", "name": "AMQ_KEYSTORE", - "value": "broker.ks" + "value": "broker.ks", + "required": true + }, + { + "description": "Password for accessing SSL keystore", + "name": "AMQ_KEYSTORE_PASSWORD", + "value": "", + "required": true + }, + { + "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.", + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "kube", + "required": false + }, + { + "description": "The A-MQ storage usage limit", + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "100 gb", + "required": false + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -97,7 +137,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's amqp port." + "description": "The broker's AMQP port." } } }, @@ -121,7 +161,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's amqp ssl port." + "description": "The broker's AMQP SSL port." } } }, @@ -145,7 +185,31 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's mqtt port." + "description": "The broker's MQTT port." + } + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "spec": { + "ports": [ + { + "port": 8883, + "targetPort": 8883 + } + ], + "selector": { + "deploymentConfig": "${APPLICATION_NAME}-amq" + } + }, + "metadata": { + "name": "${APPLICATION_NAME}-amq-mqtt-ssl", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "The broker's MQTT SSL port." } } }, @@ -169,7 +233,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's stomp port." + "description": "The broker's STOMP port." } } }, @@ -193,7 +257,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's stomp ssl port." + "description": "The broker's STOMP SSL port." } } }, @@ -217,7 +281,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp (openwire) port." + "description": "The broker's OpenWire port." } } }, @@ -241,7 +305,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp ssl (openwire) port." + "description": "The broker's OpenWire (SSL) port." } } }, @@ -268,10 +332,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-amq-6:${AMQ_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -288,10 +355,11 @@ }, "spec": { "serviceAccount": "amq-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-amq", - "image": "jboss-amq-6", + "image": "jboss-amq-62", "imagePullPolicy": "Always", "volumeMounts": [ { @@ -326,6 +394,11 @@ "protocol": "TCP" }, { + "name": "mqtt-ssl", + "containerPort": 8883, + "protocol": "TCP" + }, + { "name": "stomp", "containerPort": 61613, "protocol": "TCP" @@ -356,7 +429,7 @@ "value": "${MQ_PASSWORD}" }, { - "name": "AMQ_PROTOCOLS", + "name": "AMQ_TRANSPORTS", "value": "${MQ_PROTOCOL}" }, { @@ -376,10 +449,22 @@ "value": "${AMQ_ADMIN_PASSWORD}" }, { + "name": "AMQ_MESH_DISCOVERY_TYPE", + "value": "${AMQ_MESH_DISCOVERY_TYPE}" + }, + { "name": "AMQ_MESH_SERVICE_NAME", "value": "${APPLICATION_NAME}-amq-tcp" }, { + "name": "AMQ_MESH_SERVICE_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR", "value": "/etc/amq-secret-volume" }, @@ -388,8 +473,20 @@ "value": "${AMQ_TRUSTSTORE}" }, { + "name": "AMQ_TRUSTSTORE_PASSWORD", + "value": "${AMQ_TRUSTSTORE_PASSWORD}" + }, + { "name": "AMQ_KEYSTORE", "value": "${AMQ_KEYSTORE}" + }, + { + "name": "AMQ_KEYSTORE_PASSWORD", + "value": "${AMQ_KEYSTORE_PASSWORD}" + }, + { + "name": "AMQ_STORAGE_USAGE_LIMIT", + "value": "${AMQ_STORAGE_USAGE_LIMIT}" } ] } diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json index b64acae8b..2fc3b5b25 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json @@ -3,129 +3,149 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 A-MQ applications with persistent storage built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,amq,javaee,java,messaging,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-amq-persistent-sti" + "name": "eap64-amq-persistent-s2i" }, "labels": { - "template": "eap6-amq-persistent-sti" + "template": "eap64-amq-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { - "description": "ActiveMQ Release version, e.g. 6.2, etc.", - "name": "AMQ_RELEASE", - "value": "6.2" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "helloworld-mdb", + "required": false }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", "name": "MQ_JNDI", - "value": "java:/ConnectionFactory" + "value": "java:/ConnectionFactory", + "required": false }, { - "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", "name": "MQ_PROTOCOL", - "value": "openwire" + "value": "openwire", + "required": false }, { - "description": "Queue names", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_QUEUES", - "value": "" + "value": "HELLOWORLDMDBQueue", + "required": false }, { - "description": "Topic names", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_TOPICS", - "value": "" + "value": "HELLOWORLDMDBTopic", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { - "description": "Broker user name", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "Broker user password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "ActiveMQ Admin User", + "description": "User name for broker admin. If left empty, it will be generated.", "name": "AMQ_ADMIN_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "ActiveMQ Admin Password", + "description": "Password for broker admin. If left empty, it will be generated.", "name": "AMQ_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -149,7 +169,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The web server's http port." + "description": "The web server's HTTP port." } } }, @@ -173,32 +193,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The web server's https port." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." + "description": "The web server's HTTPS port." } } }, @@ -222,25 +217,25 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp (openwire) port." + "description": "The broker's OpenWire port." } } }, { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Route for application's http service." - } + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -249,23 +244,23 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "Route for application's https service." + "description": "Route for application's HTTPS service." } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -292,18 +287,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -317,18 +313,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -359,6 +358,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -375,6 +377,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -443,12 +446,16 @@ "value": "${MQ_TOPICS}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -504,10 +511,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-amq-6:${AMQ_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -523,10 +533,11 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-amq", - "image": "jboss-amq-6", + "image": "jboss-amq-62", "imagePullPolicy": "Always", "readinessProbe": { "exec": { @@ -590,7 +601,7 @@ "value": "${MQ_PASSWORD}" }, { - "name": "AMQ_PROTOCOLS", + "name": "AMQ_TRANSPORTS", "value": "${MQ_PROTOCOL}" }, { @@ -634,7 +645,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json index 20b234bd0..a420bb1ea 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-amq-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json @@ -3,124 +3,143 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 A-MQ applications built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 A-MQ applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,amq,javaee,java,messaging,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-amq-sti" + "name": "eap64-amq-s2i" }, "labels": { - "template": "eap6-amq-sti" + "template": "eap64-amq-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { - "description": "ActiveMQ Release version, e.g. 6.2, etc.", - "name": "AMQ_RELEASE", - "value": "6.2" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "helloworld-mdb", + "required": false }, { "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory", "name": "MQ_JNDI", - "value": "java:/ConnectionFactory" + "value": "java:/ConnectionFactory", + "required": false }, { - "description": "Protocol to configure. Only openwire is supported by EAP. amqp, amqp+ssl, mqtt, stomp, stomp+ssl, and ssl are not supported by EAP", + "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.", "name": "MQ_PROTOCOL", - "value": "openwire" + "value": "openwire", + "required": false }, { - "description": "Queue names", + "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_QUEUES", - "value": "" + "value": "HELLOWORLDMDBQueue", + "required": false }, { - "description": "Topic names", + "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.", "name": "MQ_TOPICS", - "value": "" + "value": "HELLOWORLDMDBTopic", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { - "description": "Broker user name", + "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "Broker user password", + "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.", "name": "MQ_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": false }, { - "description": "ActiveMQ Admin User", + "description": "User name for broker admin. If left empty, it will be generated.", "name": "AMQ_ADMIN_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "ActiveMQ Admin Password", + "description": "Password for broker admin. If left empty, it will be generated.", "name": "AMQ_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -144,7 +163,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The web server's http port." + "description": "The web server's HTTP port." } } }, @@ -168,32 +187,7 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The web server's https port." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." + "description": "The web server's HTTPS port." } } }, @@ -217,25 +211,25 @@ "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "The broker's tcp (openwire) port." + "description": "The broker's OpenWire port." } } }, { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Route for application's http service." - } + "name": "${APPLICATION_NAME}", + "labels": { + "application": "${APPLICATION_NAME}" + }, + "annotations": { + "description": "Route for application's HTTP service." + } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -244,23 +238,23 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, "annotations": { - "description": "Route for application's https service." + "description": "Route for application's HTTPS service." } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -287,18 +281,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -312,18 +307,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -354,6 +352,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -370,6 +371,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -438,12 +440,16 @@ "value": "${MQ_TOPICS}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -499,10 +505,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-amq-6:${AMQ_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-amq-62:1.1" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -518,10 +527,11 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-amq", - "image": "jboss-amq-6", + "image": "jboss-amq-62", "imagePullPolicy": "Always", "readinessProbe": { "exec": { @@ -579,7 +589,7 @@ "value": "${MQ_PASSWORD}" }, { - "name": "AMQ_PROTOCOLS", + "name": "AMQ_TRANSPORTS", "value": "${MQ_PROTOCOL}" }, { diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json index 146bfb1ee..3f90eb8be 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-basic-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json @@ -3,72 +3,86 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-jboss", - "description": "Application template for EAP 6 applications built using STI." + "iconClass": "icon-jboss", + "description": "Application template for EAP 6 applications built using S2I.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-basic-sti" + "name": "eap64-basic-s2i" }, "labels": { - "template": "eap6-basic-sti" + "template": "eap64-basic-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI", - "value": "https://github.com/jboss-developer/jboss-eap-quickstarts" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "6.4.x" + "name": "SOURCE_REPOSITORY_REF", + "value": "6.4.x", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "kitchensink" + "name": "CONTEXT_DIR", + "value": "kitchensink", + "required": false }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -97,36 +111,11 @@ } }, { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -135,7 +124,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -164,18 +153,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -189,18 +179,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -231,6 +224,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -246,6 +242,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -274,12 +271,16 @@ ], "env": [ { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "HORNETQ_CLUSTER_PASSWORD", @@ -301,4 +302,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json index 5df36ccc2..220d2f5b9 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json @@ -3,92 +3,110 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-jboss", - "description": "Application template for EAP 6 applications built using STI." + "iconClass": "icon-jboss", + "description": "Application template for EAP 6 applications built using S2I.", + "tags": "eap,javaee,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-https-sti" + "name": "eap64-https-s2i" }, "labels": { - "template": "eap6-https-sti" + "template": "eap64-https-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI", - "value": "https://github.com/jboss-developer/jboss-eap-quickstarts" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-developer/jboss-eap-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "6.4.x" + "name": "SOURCE_REPOSITORY_REF", + "value": "6.4.x", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "kitchensink" + "name": "CONTEXT_DIR", + "value": "kitchensink", + "required": false }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": true }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -141,36 +159,11 @@ } }, { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -179,7 +172,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -188,9 +181,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -199,12 +192,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -231,18 +224,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -256,18 +250,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -298,6 +295,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -314,6 +314,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -354,12 +355,16 @@ ], "env": [ { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -405,4 +410,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json index 289ab284f..a1a3a9f2c 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json @@ -3,148 +3,179 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 MongDB applications with persistent storage built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 MongDB applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,mongodb,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-mongodb-persistent-sti" + "name": "eap64-mongodb-persistent-s2i" }, "labels": { - "template": "eap6-mongodb-persistent-sti" + "template": "eap64-mongodb-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -202,31 +233,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 27017, "targetPort": 27017 } @@ -248,9 +254,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -259,7 +265,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -268,9 +274,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -279,12 +285,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -311,18 +317,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -336,18 +343,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -378,6 +388,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -394,6 +407,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -470,12 +484,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -543,10 +561,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -562,6 +583,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -574,11 +596,11 @@ } ], "volumeMounts": [ - { - "mountPath": "/var/lib/mongodb/data", - "name": "${APPLICATION_NAME}-mongodb-pvol" - } - ], + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], "env": [ { "name": "MONGODB_USER", @@ -633,7 +655,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" @@ -642,4 +666,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json index 22b301aa9..dfd1443ed 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mongodb-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json @@ -3,143 +3,173 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 MongDB applications built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 MongDB applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,mongodb,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-mongodb-sti" + "name": "eap64-mongodb-s2i" }, "labels": { - "template": "eap6-mongodb-sti" + "template": "eap64-mongodb-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -197,31 +227,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 27017, "targetPort": 27017 } @@ -243,9 +248,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -254,7 +259,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -263,9 +268,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -274,12 +279,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -306,18 +311,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -331,18 +337,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -373,6 +382,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -389,6 +401,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -465,12 +478,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -538,10 +555,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -557,6 +577,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -605,4 +626,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json index 648a53199..fdd368a5f 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json @@ -3,150 +3,182 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 MySQL applications with persistent storage built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,mysql,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-mysql-persistent-sti" + "name": "eap64-mysql-persistent-s2i" }, "labels": { - "template": "eap6-mysql-persistent-sti" + "template": "eap64-mysql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -204,31 +236,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 3306, "targetPort": 3306 } @@ -250,9 +257,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -261,7 +268,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -270,9 +277,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -281,12 +288,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -313,18 +320,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -338,18 +346,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -380,6 +391,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -396,6 +410,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -472,12 +487,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -545,10 +564,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -564,6 +586,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -639,7 +662,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" @@ -648,4 +673,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json index 83d5c8b18..ff6bdc112 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-mysql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json @@ -3,145 +3,176 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 MySQL applications built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 MySQL applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,mysql,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-mysql-sti" + "name": "eap64-mysql-s2i" }, "labels": { - "template": "eap6-mysql-sti" + "template": "eap64-mysql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -199,31 +230,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 3306, "targetPort": 3306 } @@ -245,9 +251,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -256,7 +262,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -265,9 +271,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -276,12 +282,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -308,18 +314,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -333,18 +340,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -375,6 +385,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -391,6 +404,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -467,12 +481,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -540,10 +558,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -559,6 +580,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -611,4 +633,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json index 53b953b7e..6443afdb0 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json @@ -3,138 +3,167 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,postgresql,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-postgresql-persistent-sti" + "name": "eap64-postgresql-persistent-s2i" }, "labels": { - "template": "eap6-postgresql-persistent-sti" + "template": "eap64-postgresql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -192,31 +221,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 5432, "targetPort": 5432 } @@ -238,9 +242,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -249,7 +253,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -258,9 +262,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -269,12 +273,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -301,18 +305,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -326,18 +331,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -368,6 +376,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -384,6 +395,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -460,12 +472,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -533,10 +549,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -552,6 +571,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-postgresql", @@ -564,10 +584,10 @@ } ], "volumeMounts": [ - { - "mountPath": "/var/lib/pgsql/data", - "name": "${APPLICATION_NAME}-postgresql-pvol" - } + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } ], "env": [ { @@ -615,7 +635,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" @@ -624,4 +646,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json index 9d660cb42..e879e51cf 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-postgresql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json @@ -3,133 +3,161 @@ "apiVersion": "v1", "metadata": { "annotations": { - "description": "Application template for EAP 6 PostgreSQL applications built using STI.", - "iconClass" : "icon-jboss" + "description": "Application template for EAP 6 PostgreSQL applications built using S2I.", + "iconClass": "icon-jboss", + "tags": "eap,postgresql,javaee,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "eap6-postgresql-sti" + "name": "eap64-postgresql-s2i" }, "labels": { - "template": "eap6-postgresql-sti" + "template": "eap64-postgresql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "EAP Release version, e.g. 6.4, etc.", - "name": "EAP_RELEASE", - "value": "6.4" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "eap-app" + "value": "eap-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Queue names", "name": "HORNETQ_QUEUES", - "value": "" + "value": "", + "required": false }, { "description": "Topic names", "name": "HORNETQ_TOPICS", - "value": "" + "value": "", + "required": false }, { "description": "The name of the secret containing the keystore file", "name": "EAP_HTTPS_SECRET", - "value": "eap-app-secret" + "value": "eap-app-secret", + "required": false }, { "description": "The name of the keystore file within the secret", "name": "EAP_HTTPS_KEYSTORE", - "value": "keystore.jks" + "value": "keystore.jks", + "required": false }, { "description": "The name associated with the server certificate", "name": "EAP_HTTPS_NAME", - "value": "" + "value": "", + "required": false }, { "description": "The password for the keystore and certificate", "name": "EAP_HTTPS_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "HornetQ cluster admin password", "name": "HORNETQ_CLUSTER_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -187,31 +215,6 @@ "spec": { "ports": [ { - "port": 8888, - "targetPort": 8888 - } - ], - "portalIP": "None", - "selector": { - "deploymentConfig": "${APPLICATION_NAME}" - } - }, - "metadata": { - "name": "${APPLICATION_NAME}-ping", - "labels": { - "application": "${APPLICATION_NAME}" - }, - "annotations": { - "description": "Ping service for clustered applications." - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "spec": { - "ports": [ - { "port": 5432, "targetPort": 5432 } @@ -233,9 +236,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -244,7 +247,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -253,9 +256,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -264,12 +267,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -296,18 +299,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-eap6-openshift:${EAP_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-eap64-openshift:1.1" } } }, @@ -321,18 +325,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -363,6 +370,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -379,6 +389,7 @@ }, "spec": { "serviceAccount": "eap-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -455,12 +466,16 @@ "value": "${DB_TX_ISOLATION}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_NAME", - "value": "${APPLICATION_NAME}-ping" + "name": "OPENSHIFT_KUBE_PING_LABELS", + "value": "application=${APPLICATION_NAME}" }, { - "name": "OPENSHIFT_DNS_PING_SERVICE_PORT", - "value": "8888" + "name": "OPENSHIFT_KUBE_PING_NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } }, { "name": "EAP_HTTPS_KEYSTORE_DIR", @@ -528,10 +543,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -547,6 +565,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-postgresql", @@ -587,4 +606,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json index 3c7812b69..729079130 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-basic-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json @@ -3,67 +3,81 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS applications built using S2I.", + "tags": "tomcat,tomcat7,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-basic-sti" + "name": "jws30-tomcat7-basic-s2i" }, "labels": { - "template": "jws-tomcat8-basic-sti" + "template": "jws30-tomcat7-basic-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "tomcat-websocket-chat", + "required": false }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -94,9 +108,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -105,7 +119,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -134,18 +148,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -159,18 +174,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -201,6 +219,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -216,6 +237,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json index d725e0606..7ce7e7fe2 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-https-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json @@ -3,87 +3,105 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS applications built using S2I.", + "tags": "tomcat,tomcat7,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-basic-sti" + "name": "jws30-tomcat7-https-s2i" }, "labels": { - "template": "jws-tomcat8-basic-sti" + "template": "jws30-tomcat7-https-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "tomcat-websocket-chat", + "required": false }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -138,9 +156,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -149,7 +167,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -158,9 +176,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -169,12 +187,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -201,18 +219,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -226,18 +245,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -268,6 +290,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -284,6 +309,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json index a993024f4..9a08ec0b0 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json @@ -3,139 +3,174 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MongoDB applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-mongodb-sti" + "name": "jws30-tomcat7-mongodb-persistent-s2i" }, "labels": { - "template": "jws-tomcat8-mongodb-sti" + "template": "jws30-tomcat7-mongodb-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -214,9 +249,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -225,7 +260,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -234,9 +269,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -245,12 +280,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -277,18 +312,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -302,18 +338,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -344,6 +383,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -360,6 +402,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -492,10 +535,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -511,6 +557,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -522,6 +569,12 @@ "protocol": "TCP" } ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], "env": [ { "name": "MONGODB_USER", @@ -553,10 +606,38 @@ } ] } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } ] } } } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json index cf35d0024..b8dfb3ad3 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mongodb-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json @@ -3,144 +3,168 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MongoDB applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MongoDB applications built using S2I.", + "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-mongodb-persistent-sti" + "name": "jws30-tomcat7-mongodb-s2i" }, "labels": { - "template": "jws-tomcat8-mongodb-persistent-sti" + "template": "jws30-tomcat7-mongodb-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" - }, - { - "description": "Size of persistent storage for database volume.", - "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -219,9 +243,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -230,7 +254,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -239,9 +263,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -250,12 +274,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -282,18 +306,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -307,18 +332,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -349,6 +377,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -365,6 +396,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -497,10 +529,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -516,6 +551,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -527,12 +563,6 @@ "protocol": "TCP" } ], - "volumeMounts": [ - { - "mountPath": "/var/lib/mongodb/data", - "name": "${APPLICATION_NAME}-mongodb-pvol" - } - ], "env": [ { "name": "MONGODB_USER", @@ -564,36 +594,10 @@ } ] } - ], - "volumes": [ - { - "name": "${APPLICATION_NAME}-mongodb-pvol", - "persistentVolumeClaim": { - "claimName": "${APPLICATION_NAME}-mongodb-claim" - } - } ] } } } - }, - { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": "${APPLICATION_NAME}-mongodb-claim", - "labels": { - "application": "${APPLICATION_NAME}" - } - }, - "spec": { - "accessModes": [ "ReadWriteOnce" ], - "resources": { - "requests": { - "storage": "${VOLUME_CAPACITY}" - } - } - } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json index 547449010..d36e330d3 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json @@ -3,146 +3,177 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MySQL applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MySQL applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-mysql-persistent-sti" + "name": "jws30-tomcat7-mysql-persistent-s2i" }, "labels": { - "template": "jws-tomcat7-mysql-persistent-sti" + "template": "jws30-tomcat7-mysql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -221,9 +252,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -232,7 +263,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -241,9 +272,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -252,12 +283,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -284,18 +315,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -309,18 +341,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -351,6 +386,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -367,6 +405,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -495,10 +534,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -514,6 +556,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -525,11 +568,11 @@ } ], "volumeMounts": [ - { - "mountPath": "/var/lib/mysql/data", - "name": "${APPLICATION_NAME}-mysql-pvol" - } - ], + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], "env": [ { "name": "MYSQL_USER", @@ -588,7 +631,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" @@ -597,4 +642,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json index 0692817bf..f5309db60 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json @@ -3,146 +3,171 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MySQL applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MySQL applications built using S2I.", + "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-mysql-persistent-sti" + "name": "jws30-tomcat7-mysql-s2i" }, "labels": { - "template": "jws-tomcat8-mysql-persistent-sti" + "template": "jws30-tomcat7-mysql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" - }, - { - "description": "Size of persistent storage for database volume.", - "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -221,9 +246,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -232,7 +257,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -241,9 +266,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -252,12 +277,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -284,18 +309,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -309,18 +335,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -351,6 +380,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -367,6 +399,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -495,10 +528,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -514,6 +550,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -524,12 +561,6 @@ "protocol": "TCP" } ], - "volumeMounts": [ - { - "mountPath": "/var/lib/mysql/data", - "name": "${APPLICATION_NAME}-mysql-pvol" - } - ], "env": [ { "name": "MYSQL_USER", @@ -565,36 +596,10 @@ } ] } - ], - "volumes": [ - { - "name": "${APPLICATION_NAME}-mysql-pvol", - "persistentVolumeClaim": { - "claimName": "${APPLICATION_NAME}-mysql-claim" - } - } ] } } } - }, - { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": "${APPLICATION_NAME}-mysql-claim", - "labels": { - "application": "${APPLICATION_NAME}" - } - }, - "spec": { - "accessModes": [ "ReadWriteOnce" ], - "resources": { - "requests": { - "storage": "${VOLUME_CAPACITY}" - } - } - } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json index b871b48d0..ee88a4c69 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json @@ -3,134 +3,162 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-postgresql-persistent-sti" + "name": "jws30-tomcat7-postgresql-persistent-s2i" }, "labels": { - "template": "jws-tomcat7-postgresql-persistent-sti" + "template": "jws30-tomcat7-postgresql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "Size of persistent storage for database volume.", "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -209,9 +237,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -220,7 +248,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -229,9 +257,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -240,12 +268,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -272,18 +300,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -297,18 +326,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -339,6 +371,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -355,6 +390,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -483,10 +519,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -502,6 +541,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-postgresql", @@ -513,11 +553,11 @@ } ], "volumeMounts": [ - { - "mountPath": "/var/lib/pgsql/data", - "name": "${APPLICATION_NAME}-postgresql-pvol" - } - ], + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], "env": [ { "name": "POSTGRESQL_USER", @@ -564,7 +604,9 @@ } }, "spec": { - "accessModes": [ "ReadWriteOnce" ], + "accessModes": [ + "ReadWriteOnce" + ], "resources": { "requests": { "storage": "${VOLUME_CAPACITY}" @@ -573,4 +615,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json index 384ff1b8f..f5940a7a1 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-postgresql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json @@ -3,129 +3,156 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS PostgreSQL applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications built using S2I.", + "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-postgresql-sti" + "name": "jws30-tomcat7-postgresql-s2i" }, "labels": { - "template": "jws-tomcat7-postgresql-sti" + "template": "jws30-tomcat7-postgresql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -204,9 +231,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -215,7 +242,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -224,9 +251,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -235,12 +262,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -267,18 +294,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat7-openshift:1.1" } } }, @@ -292,18 +320,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -334,6 +365,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -350,6 +384,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -478,10 +513,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -497,6 +535,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-postgresql", @@ -536,4 +575,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json index d74c2dfe3..b24ce40ae 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-basic-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json @@ -3,67 +3,81 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS applications built using S2I.", + "tags": "tomcat,tomcat8,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-basic-sti" + "name": "jws30-tomcat8-basic-s2i" }, "labels": { - "template": "jws-tomcat7-basic-sti" + "template": "jws30-tomcat8-basic-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "tomcat-websocket-chat", + "required": false }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -94,9 +108,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -105,7 +119,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -134,18 +148,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -159,18 +174,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -201,6 +219,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -216,6 +237,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json index b94142135..7e788d0db 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-https-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json @@ -3,87 +3,105 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS applications built using S2I.", + "tags": "tomcat,tomcat8,java,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-basic-sti" + "name": "jws30-tomcat8-https-s2i" }, "labels": { - "template": "jws-tomcat7-basic-sti" + "template": "jws30-tomcat8-https-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts.git", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "tomcat-websocket-chat", + "required": false }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -138,9 +156,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -149,7 +167,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -158,9 +176,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -169,12 +187,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -201,18 +219,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -226,18 +245,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -268,6 +290,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -284,6 +309,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json index 892f27fe3..2f1d69c75 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json @@ -3,139 +3,174 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MongoDB applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-mongodb-sti" + "name": "jws30-tomcat8-mongodb-persistent-s2i" }, "labels": { - "template": "jws-tomcat7-mongodb-sti" + "template": "jws30-tomcat8-mongodb-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -214,9 +249,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -225,7 +260,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -234,9 +269,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -245,12 +280,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -277,18 +312,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -302,18 +338,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -344,6 +383,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -360,6 +402,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -492,10 +535,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -511,6 +557,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -522,6 +569,12 @@ "protocol": "TCP" } ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mongodb/data", + "name": "${APPLICATION_NAME}-mongodb-pvol" + } + ], "env": [ { "name": "MONGODB_USER", @@ -553,10 +606,38 @@ } ] } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mongodb-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mongodb-claim" + } + } ] } } } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mongodb-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json index 0c7b7d8e3..bad676f2e 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mongodb-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json @@ -3,144 +3,168 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MongoDB applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MongoDB applications built using S2I.", + "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-mongodb-persistent-sti" + "name": "jws30-tomcat8-mongodb-s2i" }, "labels": { - "template": "jws-tomcat7-mongodb-persistent-sti" + "template": "jws30-tomcat8-mongodb-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-mongodb", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" - }, - { - "description": "Size of persistent storage for database volume.", - "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Disable data file preallocation.", - "name": "MONGODB_NOPREALLOC" + "name": "MONGODB_NOPREALLOC", + "required": false }, { "description": "Set MongoDB to use a smaller default data file size.", - "name": "MONGODB_SMALLFILES" + "name": "MONGODB_SMALLFILES", + "required": false }, { "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.", - "name": "MONGODB_QUIET" + "name": "MONGODB_QUIET", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database admin password", "name": "DB_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -219,9 +243,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -230,7 +254,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -239,9 +263,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -250,12 +274,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -282,18 +306,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -307,18 +332,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -349,6 +377,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -365,6 +396,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -497,10 +529,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mongodb:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -516,6 +551,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mongodb", @@ -527,12 +563,6 @@ "protocol": "TCP" } ], - "volumeMounts": [ - { - "mountPath": "/var/lib/mongodb/data", - "name": "${APPLICATION_NAME}-mongodb-pvol" - } - ], "env": [ { "name": "MONGODB_USER", @@ -564,36 +594,10 @@ } ] } - ], - "volumes": [ - { - "name": "${APPLICATION_NAME}-mongodb-pvol", - "persistentVolumeClaim": { - "claimName": "${APPLICATION_NAME}-mongodb-claim" - } - } ] } } } - }, - { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": "${APPLICATION_NAME}-mongodb-claim", - "labels": { - "application": "${APPLICATION_NAME}" - } - }, - "spec": { - "accessModes": [ "ReadWriteOnce" ], - "resources": { - "requests": { - "storage": "${VOLUME_CAPACITY}" - } - } - } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json index 226a983b7..e20a45982 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-mysql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json @@ -3,141 +3,177 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MySQL applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MySQL applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-mysql-sti" + "name": "jws30-tomcat8-mysql-persistent-s2i" }, "labels": { - "template": "jws-tomcat8-mysql-sti" + "template": "jws30-tomcat8-mysql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -216,9 +252,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -227,7 +263,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -236,9 +272,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -247,12 +283,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -279,18 +315,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -304,18 +341,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -346,6 +386,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -362,6 +405,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -490,10 +534,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -509,6 +556,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -519,6 +567,12 @@ "protocol": "TCP" } ], + "volumeMounts": [ + { + "mountPath": "/var/lib/mysql/data", + "name": "${APPLICATION_NAME}-mysql-pvol" + } + ], "env": [ { "name": "MYSQL_USER", @@ -554,10 +608,38 @@ } ] } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-mysql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-mysql-claim" + } + } ] } } } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-mysql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json index 2ae59ec71..1b9624756 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat7-mysql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json @@ -3,141 +3,171 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS MySQL applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS MySQL applications built using S2I.", + "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat7-mysql-sti" + "name": "jws30-tomcat8-mysql-s2i" }, "labels": { - "template": "jws-tomcat7-mysql-sti" + "template": "jws30-tomcat8-mysql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "Sets how the table names are stored and compared.", - "name": "MYSQL_LOWER_CASE_TABLE_NAMES" + "name": "MYSQL_LOWER_CASE_TABLE_NAMES", + "required": false }, { "description": "The maximum permitted number of simultaneous client connections.", - "name": "MYSQL_MAX_CONNECTIONS" + "name": "MYSQL_MAX_CONNECTIONS", + "required": false }, { "description": "The minimum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MIN_WORD_LEN" + "name": "MYSQL_FT_MIN_WORD_LEN", + "required": false }, { "description": "The maximum length of the word to be included in a FULLTEXT index.", - "name": "MYSQL_FT_MAX_WORD_LEN" + "name": "MYSQL_FT_MAX_WORD_LEN", + "required": false }, { "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.", - "name": "MYSQL_AIO" + "name": "MYSQL_AIO", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -216,9 +246,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -227,7 +257,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -236,9 +266,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -247,12 +277,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -279,18 +309,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat7-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -304,18 +335,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -346,6 +380,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -362,6 +399,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -490,10 +528,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "mysql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -509,6 +550,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-mysql", @@ -560,4 +602,4 @@ } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json index b46f23225..dc492a38e 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json @@ -3,129 +3,162 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS PostgreSQL applications built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.", + "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-postgresql-sti" + "name": "jws30-tomcat8-postgresql-persistent-s2i" }, "labels": { - "template": "jws-tomcat8-postgresql-sti" + "template": "jws30-tomcat8-postgresql-persistent-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" + "value": "root", + "required": true + }, + { + "description": "Size of persistent storage for database volume.", + "name": "VOLUME_CAPACITY", + "value": "512Mi", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -204,9 +237,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -215,7 +248,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -224,9 +257,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -235,12 +268,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -267,18 +300,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -292,18 +326,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -334,6 +371,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -350,6 +390,7 @@ }, "spec": { "serviceAccount": "jws-service-account", + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}", @@ -478,10 +519,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -497,6 +541,7 @@ } }, "spec": { + "terminationGracePeriodSeconds": 60, "containers": [ { "name": "${APPLICATION_NAME}-postgresql", @@ -507,6 +552,12 @@ "protocol": "TCP" } ], + "volumeMounts": [ + { + "mountPath": "/var/lib/pgsql/data", + "name": "${APPLICATION_NAME}-postgresql-pvol" + } + ], "env": [ { "name": "POSTGRESQL_USER", @@ -530,10 +581,38 @@ } ] } + ], + "volumes": [ + { + "name": "${APPLICATION_NAME}-postgresql-pvol", + "persistentVolumeClaim": { + "claimName": "${APPLICATION_NAME}-postgresql-claim" + } + } ] } } } + }, + { + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": { + "name": "${APPLICATION_NAME}-postgresql-claim", + "labels": { + "application": "${APPLICATION_NAME}" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "${VOLUME_CAPACITY}" + } + } + } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json index b4644ac08..242b37a79 100644 --- a/roles/openshift_examples/files/examples/xpaas-templates/jws-tomcat8-postgresql-persistent-sti.json +++ b/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json @@ -3,134 +3,156 @@ "apiVersion": "v1", "metadata": { "annotations": { - "iconClass" : "icon-tomcat", - "description": "Application template for JWS PostgreSQL applications with persistent storage built using STI." + "iconClass": "icon-tomcat", + "description": "Application template for JWS PostgreSQL applications built using S2I.", + "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas", + "version": "1.1.0" }, - "name": "jws-tomcat8-postgresql-persistent-sti" + "name": "jws30-tomcat8-postgresql-s2i" }, "labels": { - "template": "jws-tomcat8-postgresql-persistent-sti" + "template": "jws30-tomcat8-postgresql-s2i", + "xpaas": "1.1.0" }, "parameters": [ { - "description": "JWS Release version, e.g. 3.0, 2.1, etc.", - "name": "JWS_RELEASE", - "value": "3.0" - }, - { "description": "The name for the application.", "name": "APPLICATION_NAME", - "value": "jws-app" + "value": "jws-app", + "required": true }, { "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>", - "name": "APPLICATION_HOSTNAME", - "value": "" + "name": "APPLICATION_DOMAIN", + "value": "", + "required": false }, { "description": "Git source URI for application", - "name": "GIT_URI" + "name": "SOURCE_REPOSITORY_URL", + "value": "https://github.com/jboss-openshift/openshift-quickstarts", + "required": true }, { "description": "Git branch/tag reference", - "name": "GIT_REF", - "value": "master" + "name": "SOURCE_REPOSITORY_REF", + "value": "1.1", + "required": false }, { "description": "Path within Git project to build; empty for root project directory.", - "name": "GIT_CONTEXT_DIR", - "value": "" + "name": "CONTEXT_DIR", + "value": "todolist/todolist-jdbc", + "required": false }, { "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb", "name": "DB_JNDI", - "value": "" + "value": "java:jboss/datasources/TodoListDS", + "required": false }, { "description": "Database name", "name": "DB_DATABASE", - "value": "root" - }, - { - "description": "Size of persistent storage for database volume.", - "name": "VOLUME_CAPACITY", - "value": "512Mi" + "value": "root", + "required": true }, { "description": "The name of the secret containing the certificate files", "name": "JWS_HTTPS_SECRET", - "value": "jws-app-secret" + "value": "jws-app-secret", + "required": true }, { "description": "The name of the certificate file within the secret", "name": "JWS_HTTPS_CERTIFICATE", - "value": "server.crt" + "value": "server.crt", + "required": false }, { "description": "The name of the certificate key file within the secret", "name": "JWS_HTTPS_CERTIFICATE_KEY", - "value": "server.key" + "value": "server.key", + "required": false }, { "description": "The certificate password", "name": "JWS_HTTPS_CERTIFICATE_PASSWORD", - "value": "" + "value": "", + "required": false }, { "description": "Sets xa-pool/min-pool-size for the configured datasource.", - "name": "DB_MIN_POOL_SIZE" + "name": "DB_MIN_POOL_SIZE", + "required": false }, { "description": "Sets xa-pool/max-pool-size for the configured datasource.", - "name": "DB_MAX_POOL_SIZE" + "name": "DB_MAX_POOL_SIZE", + "required": false }, { "description": "Sets transaction-isolation for the configured datasource.", - "name": "DB_TX_ISOLATION" + "name": "DB_TX_ISOLATION", + "required": false }, { "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.", - "name": "POSTGRESQL_MAX_CONNECTIONS" + "name": "POSTGRESQL_MAX_CONNECTIONS", + "required": false }, { "description": "Configures how much memory is dedicated to PostgreSQL for caching data.", - "name": "POSTGRESQL_SHARED_BUFFERS" + "name": "POSTGRESQL_SHARED_BUFFERS", + "required": false }, { "description": "Database user name", "name": "DB_USERNAME", "from": "user[a-zA-Z0-9]{3}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Database user password", "name": "DB_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin User", "name": "JWS_ADMIN_USERNAME", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "JWS Admin Password", "name": "JWS_ADMIN_PASSWORD", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { - "description": "Github trigger secret", - "name": "GITHUB_TRIGGER_SECRET", + "description": "GitHub trigger secret", + "name": "GITHUB_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true }, { "description": "Generic build trigger secret", - "name": "GENERIC_TRIGGER_SECRET", + "name": "GENERIC_WEBHOOK_SECRET", "from": "[a-zA-Z0-9]{8}", - "generate": "expression" + "generate": "expression", + "required": true + }, + { + "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.", + "name": "IMAGE_STREAM_NAMESPACE", + "value": "openshift", + "required": true } ], "objects": [ @@ -209,9 +231,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-http-route", + "id": "${APPLICATION_NAME}-http", "metadata": { - "name": "${APPLICATION_NAME}-http-route", + "name": "${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -220,7 +242,7 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "${APPLICATION_NAME}" } @@ -229,9 +251,9 @@ { "kind": "Route", "apiVersion": "v1", - "id": "${APPLICATION_NAME}-https-route", + "id": "${APPLICATION_NAME}-https", "metadata": { - "name": "${APPLICATION_NAME}-https-route", + "name": "secure-${APPLICATION_NAME}", "labels": { "application": "${APPLICATION_NAME}" }, @@ -240,12 +262,12 @@ } }, "spec": { - "host": "${APPLICATION_HOSTNAME}", + "host": "${APPLICATION_DOMAIN}", "to": { "name": "secure-${APPLICATION_NAME}" }, "tls": { - "termination" : "passthrough" + "termination": "passthrough" } } }, @@ -272,18 +294,19 @@ "source": { "type": "Git", "git": { - "uri": "${GIT_URI}", - "ref": "${GIT_REF}" + "uri": "${SOURCE_REPOSITORY_URL}", + "ref": "${SOURCE_REPOSITORY_REF}" }, - "contextDir":"${GIT_CONTEXT_DIR}" + "contextDir": "${CONTEXT_DIR}" }, "strategy": { "type": "Source", "sourceStrategy": { + "forcePull": true, "from": { "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "jboss-webserver3-tomcat8-openshift:${JWS_RELEASE}" + "namespace": "${IMAGE_STREAM_NAMESPACE}", + "name": "jboss-webserver30-tomcat8-openshift:1.1" } } }, @@ -297,18 +320,21 @@ { "type": "GitHub", "github": { - "secret": "${GITHUB_TRIGGER_SECRET}" + "secret": "${GITHUB_WEBHOOK_SECRET}" } }, { "type": "Generic", "generic": { - "secret": "${GENERIC_TRIGGER_SECRET}" + "secret": "${GENERIC_WEBHOOK_SECRET}" } }, { "type": "ImageChange", "imageChange": {} + }, + { + "type": "ConfigChange" } ] } @@ -339,6 +365,9 @@ "name": "${APPLICATION_NAME}" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -483,10 +512,13 @@ ], "from": { "kind": "ImageStreamTag", - "namespace": "openshift", + "namespace": "${IMAGE_STREAM_NAMESPACE}", "name": "postgresql:latest" } } + }, + { + "type": "ConfigChange" } ], "replicas": 1, @@ -512,12 +544,6 @@ "protocol": "TCP" } ], - "volumeMounts": [ - { - "mountPath": "/var/lib/pgsql/data", - "name": "${APPLICATION_NAME}-postgresql-pvol" - } - ], "env": [ { "name": "POSTGRESQL_USER", @@ -541,36 +567,10 @@ } ] } - ], - "volumes": [ - { - "name": "${APPLICATION_NAME}-postgresql-pvol", - "persistentVolumeClaim": { - "claimName": "${APPLICATION_NAME}-postgresql-claim" - } - } ] } } } - }, - { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": "${APPLICATION_NAME}-postgresql-claim", - "labels": { - "application": "${APPLICATION_NAME}" - } - }, - "spec": { - "accessModes": [ "ReadWriteOnce" ], - "resources": { - "requests": { - "storage": "${VOLUME_CAPACITY}" - } - } - } } ] -} +}
\ No newline at end of file diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 40b7a5d6e..0b4784bae 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -37,6 +37,72 @@ failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" changed_when: false +- name: Import origin infrastructure-templates + command: > + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }} + when: openshift_examples_load_centos | bool + register: oex_import_infrastructure + failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0" + changed_when: false + +- name: Import enterprise infrastructure-templates + command: > + {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }} + when: openshift_examples_load_rhel | bool + register: oex_import_infrastructure + failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0" + changed_when: false + +# The 1.1 release of the xpaas content for OpenShift renamed all the templates +- name: Remove old xpaas templates from filesystem + file: + path: "{{ xpaas_templates_base }}/{{ item }}" + state: absent + with_items: + - amq6-persistent.json + - amq6.json + - eap6-amq-persistent-sti.json + - eap6-amq-sti.json + - eap6-basic-sti.json + - eap6-https-sti.json + - eap6-mongodb-persistent-sti.json + - eap6-mongodb-sti.json + - eap6-mysql-persistent-sti.json + - eap6-mysql-sti.json + - eap6-postgresql-persistent-sti.json + - eap6-postgresql-sti.json + - jws-tomcat7-basic-sti.json + - jws-tomcat7-https-sti.json + - jws-tomcat7-mongodb-sti.json + - jws-tomcat7-mongodb-persistent-sti.json + - jws-tomcat7-mysql-persistent-sti.json + - jws-tomcat7-mysql-sti.json + - jws-tomcat7-postgresql-persistent-sti.json + - jws-tomcat8-postgresql-persistent-sti.json + - jws-tomcat8-basic-sti.json + - jws-tomcat8-https-sti.json + - jws-tomcat8-mongodb-sti.json + - jws-tomcat8-mongodb-persistent-sti.json + - jws-tomcat8-mysql-sti.json + - jws-tomcat8-mysql-persistent-sti.json + - jws-tomcat8-postgresql-sti.json + - jws-tomcat7-postgresql-sti.json + +- name: Remove old xpaas templates from openshift namespace + command: > + {{ openshift.common.client_binary }} -n openshift delete + templates/amq6 templates/amq6-persistent templates/eap6-amq-persistent-sti templates/eap6-amq-sti \ + templates/eap6-basic-sti templates/eap6-basic-sti templates/eap6-mongodb-persistent-sti templates/eap6-mongodb-sti \ + templates/eap6-mysql-persistent-sti templates/eap6-mysql-sti templates/eap6-postgresql-persistent-sti \ + templates/eap6-postgresql-sti templates/jws-tomcat7-basic-sti templates/jws-tomcat7-basic-sti \ + templates/jws-tomcat7-mongodb-persistent-sti templates/jws-tomcat7-mongodb-sti \ + templates/jws-tomcat7-mysql-persistent-sti templates/jws-tomcat7-mysql-sti \ + templates/jws-tomcat7-postgresql-persistent-sti templates/jws-tomcat7-postgresql-sti \ + templates/jws-tomcat8-basic-sti templates/jws-tomcat8-basic-sti templates/jws-tomcat8-mongodb-persistent-sti + when: openshift_examples_load_xpaas | bool + register: oex_delete_old_xpaas_templates + failed_when: "'not found' not in oex_delete_old_xpaas_templates.stderr and oex_delete_old_xpaas_templates.rc != 0" + changed_when: false - name: Import xPaas image streams command: > diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 163e67f62..091ba4e2b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -20,10 +20,27 @@ EXAMPLES = ''' import ConfigParser import copy import os +import StringIO +import yaml from distutils.util import strtobool from distutils.version import LooseVersion -from netaddr import IPNetwork +import struct +import socket +def first_ip(network): + """ Return the first IPv4 address in network + + Args: + network (str): network in CIDR format + Returns: + str: first IPv4 address + """ + atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] + itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) + + (address, netmask) = network.split('/') + netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff + return itoa((atoi(address) & netmask_i) + 1) def hostname_valid(hostname): """ Test if specified hostname should be considered valid @@ -307,6 +324,23 @@ def set_fluentd_facts_if_unset(facts): facts['common']['use_fluentd'] = use_fluentd return facts +def set_flannel_facts_if_unset(facts): + """ Set flannel facts if not already present in facts dict + dict: the facts dict updated with the flannel facts if + missing + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with the flannel + facts if they were not already present + + """ + if 'common' in facts: + if 'use_flannel' not in facts['common']: + use_flannel = False + facts['common']['use_flannel'] = use_flannel + return facts + def set_node_schedulability(facts): """ Set schedulable facts if not already present in facts dict Args: @@ -407,7 +441,7 @@ def set_identity_providers_if_unset(facts): name='allow_all', challenge=True, login=True, kind='AllowAllPasswordIdentityProvider' ) - if deployment_type == 'enterprise': + if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']: identity_provider = dict( name='deny_all', challenge=True, login=True, kind='DenyAllPasswordIdentityProvider' @@ -484,12 +518,16 @@ def set_aggregate_facts(facts): dict: the facts dict updated with aggregated facts """ all_hostnames = set() + internal_hostnames = set() if 'common' in facts: all_hostnames.add(facts['common']['hostname']) all_hostnames.add(facts['common']['public_hostname']) all_hostnames.add(facts['common']['ip']) all_hostnames.add(facts['common']['public_ip']) + internal_hostnames.add(facts['common']['hostname']) + internal_hostnames.add(facts['common']['ip']) + if 'master' in facts: # FIXME: not sure why but facts['dns']['domain'] fails cluster_domain = 'cluster.local' @@ -497,13 +535,71 @@ def set_aggregate_facts(facts): all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_public_hostname']) - all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc', - 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', - 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]) - first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1]) + svc_names = ['openshift', 'openshift.default', 'openshift.default.svc', + 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default', + 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain] + all_hostnames.update(svc_names) + internal_hostnames.update(svc_names) + first_svc_ip = first_ip(facts['master']['portal_net']) all_hostnames.add(first_svc_ip) + internal_hostnames.add(first_svc_ip) facts['common']['all_hostnames'] = list(all_hostnames) + facts['common']['internal_hostnames'] = list(internal_hostnames) + + return facts + + +def set_etcd_facts_if_unset(facts): + """ + If using embedded etcd, loads the data directory from master-config.yaml. + + If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf. + + If anything goes wrong parsing these, the fact will not be set. + """ + if 'master' in facts and facts['master']['embedded_etcd']: + etcd_facts = facts['etcd'] if 'etcd' in facts else dict() + + if 'etcd_data_dir' not in etcd_facts: + try: + # Parse master config to find actual etcd data dir: + master_cfg_path = os.path.join(facts['common']['config_base'], + 'master/master-config.yaml') + master_cfg_f = open(master_cfg_path, 'r') + config = yaml.safe_load(master_cfg_f.read()) + master_cfg_f.close() + + etcd_facts['etcd_data_dir'] = \ + config['etcdConfig']['storageDirectory'] + + facts['etcd'] = etcd_facts + + # We don't want exceptions bubbling up here: + # pylint: disable=broad-except + except Exception: + pass + else: + etcd_facts = facts['etcd'] if 'etcd' in facts else dict() + + # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf: + try: + # Add a fake section for parsing: + ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read() + ini_fp = StringIO.StringIO(ini_str) + config = ConfigParser.RawConfigParser() + config.readfp(ini_fp) + etcd_data_dir = config.get('root', 'ETCD_DATA_DIR') + if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'): + etcd_data_dir = etcd_data_dir[1:-1] + + etcd_facts['etcd_data_dir'] = etcd_data_dir + facts['etcd'] = etcd_facts + + # We don't want exceptions bubbling up here: + # pylint: disable=broad-except + except Exception: + pass return facts @@ -534,21 +630,18 @@ def set_deployment_facts_if_unset(facts): config_base = '/etc/origin' if deployment_type in ['enterprise', 'online']: config_base = '/etc/openshift' + # Handle upgrade scenarios when symlinks don't yet exist: + if not os.path.exists(config_base) and os.path.exists('/etc/openshift'): + config_base = '/etc/openshift' facts['common']['config_base'] = config_base if 'data_dir' not in facts['common']: data_dir = '/var/lib/origin' if deployment_type in ['enterprise', 'online']: data_dir = '/var/lib/openshift' + # Handle upgrade scenarios when symlinks don't yet exist: + if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'): + data_dir = '/var/lib/openshift' facts['common']['data_dir'] = data_dir - facts['common']['version'] = version = get_openshift_version() - if version is not None: - if deployment_type == 'origin': - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') - else: - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') - else: - version_gt_3_1_or_1_1 = True - facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 for role in ('master', 'node'): if role in facts: @@ -582,12 +675,34 @@ def set_deployment_facts_if_unset(facts): return facts +def set_version_facts_if_unset(facts): + """ Set version facts. This currently includes common.version and + common.version_greater_than_3_1_or_1_1. + + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with version facts. + """ + if 'common' in facts: + deployment_type = facts['common']['deployment_type'] + facts['common']['version'] = version = get_openshift_version() + if version is not None: + if deployment_type == 'origin': + version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') + else: + version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') + else: + version_gt_3_1_or_1_1 = True + facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 + return facts -def set_sdn_facts_if_unset(facts): +def set_sdn_facts_if_unset(facts, system_facts): """ Set sdn facts if not already present in facts dict Args: facts (dict): existing facts + system_facts (dict): ansible_facts Returns: dict: the facts dict updated with the generated sdn facts if they were not already present @@ -606,9 +721,18 @@ def set_sdn_facts_if_unset(facts): if 'sdn_host_subnet_length' not in facts['master']: facts['master']['sdn_host_subnet_length'] = '8' - if 'node' in facts: - if 'sdn_mtu' not in facts['node']: - facts['node']['sdn_mtu'] = '1450' + if 'node' in facts and 'sdn_mtu' not in facts['node']: + node_ip = facts['common']['ip'] + + # default MTU if interface MTU cannot be detected + facts['node']['sdn_mtu'] = '1450' + + for val in system_facts.itervalues(): + if isinstance(val, dict) and 'mtu' in val: + mtu = val['mtu'] + + if 'ipv4' in val and val['ipv4'].get('address') == node_ip: + facts['node']['sdn_mtu'] = str(mtu - 50) return facts @@ -841,7 +965,7 @@ class OpenShiftFacts(object): Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd'] def __init__(self, role, filename, local_facts): self.changed = False @@ -875,13 +999,16 @@ class OpenShiftFacts(object): facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) facts = set_fluentd_facts_if_unset(facts) + facts = set_flannel_facts_if_unset(facts) facts = set_node_schedulability(facts) facts = set_master_selectors(facts) facts = set_metrics_facts_if_unset(facts) facts = set_identity_providers_if_unset(facts) - facts = set_sdn_facts_if_unset(facts) + facts = set_sdn_facts_if_unset(facts, self.system_facts) facts = set_deployment_facts_if_unset(facts) + facts = set_version_facts_if_unset(facts) facts = set_aggregate_facts(facts) + facts = set_etcd_facts_if_unset(facts) return dict(openshift=facts) def get_defaults(self, roles): @@ -920,11 +1047,12 @@ class OpenShiftFacts(object): session_name='ssn', session_secrets_file='', access_token_max_seconds=86400, auth_token_max_seconds=500, - oauth_grant_method='auto', cluster_defer_ha=False) + oauth_grant_method='auto') defaults['master'] = master if 'node' in roles: - node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16') + node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16', + iptables_sync_period='5s') defaults['node'] = node return defaults diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index a46b45b8c..913f0dc78 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -6,8 +6,10 @@ - ansible_version | version_compare('1.9.0', 'ne') - ansible_version | version_compare('1.9.0.1', 'ne') -- name: Ensure python-netaddr is installed - yum: pkg=python-netaddr state=installed +- name: Ensure PyYaml is installed + yum: pkg={{ item }} state=installed + with_items: + - PyYAML - name: Gather Cluster facts openshift_facts: diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 37028e0f6..4b9500cbd 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -2,3 +2,13 @@ - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false)) + +- name: restart master api + service: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + +# TODO: need to fix up ignore_errors here +- name: restart master controllers + service: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' + ignore_errors: yes diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 3a886935f..185bfb8f3 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -9,16 +9,22 @@ when: openshift_master_oauth_grant_method is defined - fail: + msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" + when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"])) +- fail: + msg: "'native' high availability is not supported for the requested OpenShift version" + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool +- fail: msg: "openshift_master_cluster_password must be set for multi-master installations" - when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined + when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) - name: Set master facts openshift_facts: role: master local_facts: + cluster_method: "{{ openshift_master_cluster_method | default(None) }}" cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}" debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" api_port: "{{ openshift_master_api_port | default(None) }}" api_url: "{{ openshift_master_api_url | default(None) }}" @@ -41,6 +47,8 @@ portal_net: "{{ openshift_master_portal_net | default(None) }}" session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}" session_name: "{{ openshift_master_session_name | default(None) }}" + session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}" + session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}" session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}" access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}" auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}" @@ -63,6 +71,8 @@ controller_args: "{{ osm_controller_args | default(None) }}" infra_nodes: "{{ num_infra | default(None) }}" disabled_features: "{{ osm_disabled_features | default(None) }}" + master_count: "{{ openshift_master_count | default(None) }}" + controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}" - name: Install Master package yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present @@ -77,7 +87,7 @@ domain: cluster.local when: openshift.master.embedded_dns -- name: Create config parent directory if it doesn't exist +- name: Create config parent directory if it does not exist file: path: "{{ openshift_master_config_dir }}" state: directory @@ -90,6 +100,8 @@ creates: "{{ openshift_master_policy }}" notify: - restart master + - restart master api + - restart master controllers - name: Create the scheduler config template: @@ -98,6 +110,8 @@ backup: true notify: - restart master + - restart master api + - restart master controllers - name: Install httpd-tools if needed yum: pkg=httpd-tools state=present @@ -120,6 +134,44 @@ when: item.kind == 'HTPasswdPasswordIdentityProvider' with_items: openshift.master.identity_providers +# workaround for missing systemd unit files for controllers/api +- name: Create the api service file + template: + src: atomic-openshift-master-api.service.j2 + dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service + force: no + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" +- name: Create the controllers service file + template: + src: atomic-openshift-master-controllers.service.j2 + dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service + force: no + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" +- name: Create the api env file + template: + src: atomic-openshift-master-api.j2 + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + force: no + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" +- name: Create the controllers env file + template: + src: atomic-openshift-master-controllers.j2 + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + force: no + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" +- command: systemctl daemon-reload + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" +# end workaround for missing systemd unit files + +- name: Create session secrets file + template: + dest: "{{ openshift.master.session_secrets_file }}" + src: sessionSecretsFile.yaml.v1.j2 + force: no + notify: + - restart master + - restart master api + # TODO: add the validate parameter when there is a validation command to run - name: Create master config template: @@ -128,12 +180,15 @@ backup: true notify: - restart master + - restart master api + - restart master controllers - name: Configure master settings lineinfile: dest: /etc/sysconfig/{{ openshift.common.service_type }}-master regexp: "{{ item.regex }}" line: "{{ item.line }}" + create: yes with_items: - regex: '^OPTIONS=' line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}" @@ -142,6 +197,34 @@ notify: - restart master +- name: Configure master api settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + with_items: + - regex: '^OPTIONS=' + line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443" + - regex: '^CONFIG_FILE=' + line: "CONFIG_FILE={{ openshift_master_config_file }}" + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" + notify: + - restart master api + +- name: Configure master controller settings + lineinfile: + dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + with_items: + - regex: '^OPTIONS=' + line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444" + - regex: '^CONFIG_FILE=' + line: "CONFIG_FILE={{ openshift_master_config_file }}" + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" + notify: + - restart master controllers + - name: Start and enable master service: name={{ openshift.common.service_type }}-master enabled=yes state=started when: not openshift_master_ha | bool @@ -149,15 +232,37 @@ - set_fact: master_service_status_changed = start_result | changed + when: not openshift_master_ha | bool + +- name: Start and enable master api + service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' + register: start_result + +- set_fact: + master_api_service_status_changed = start_result | changed + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' + +# TODO: fix the ugly workaround of setting ignore_errors +# the controllers service tries to start even if it is already started +- name: Start and enable master controller + service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' + register: start_result + ignore_errors: yes + +- set_fact: + master_controllers_service_status_changed = start_result | changed + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' - name: Install cluster packages yum: pkg=pcs state=present - when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool + when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' register: install_result - name: Start and enable cluster service service: name=pcsd enabled=yes state=started - when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool + when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' - name: Set the cluster user password shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2 new file mode 100644 index 000000000..205934248 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2 @@ -0,0 +1,9 @@ +OPTIONS= +CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 new file mode 100644 index 000000000..ba19fb348 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Atomic OpenShift Master API +Documentation=https://github.com/openshift/origin +After=network.target +After=etcd.service +Before={{ openshift.common.service_type }}-node.service +Requires=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier=atomic-openshift-master-api + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 new file mode 100644 index 000000000..205934248 --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 @@ -0,0 +1,9 @@ +OPTIONS= +CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml + +# Proxy configuration +# Origin uses standard HTTP_PROXY environment variables. Be sure to set +# NO_PROXY for your master +#NO_PROXY=master.example.com +#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT +#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 new file mode 100644 index 000000000..8952c86ef --- /dev/null +++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 @@ -0,0 +1,22 @@ +[Unit] +Description=Atomic OpenShift Master Controllers +Documentation=https://github.com/openshift/origin +After=network.target +After={{ openshift.common.service_type }}-master-api.service +Before={{ openshift.common.service_type }}-node.service +Requires=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +Environment=GOTRACEBACK=crash +ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS +LimitNOFILE=131072 +LimitCORE=infinity +WorkingDirectory={{ openshift.common.data_dir }} +SyslogIdentifier={{ openshift.common.service_type }}-master-controllers +Restart=on-failure + +[Install] +WantedBy=multi-user.target +WantedBy={{ openshift.common.service_type }}-node.service diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 73a0bc6cc..bb12a0a0f 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,5 +1,5 @@ apiLevels: -{% if openshift.common.deployment_type == "enterprise" %} +{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} - v1beta3 {% endif %} - v1 @@ -10,24 +10,33 @@ assetConfig: publicURL: {{ openshift.master.public_console_url }}/ servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }} + bindNetwork: tcp4 certFile: master.server.crt clientCA: "" keyFile: master.server.key maxRequestsInFlight: 0 requestTimeoutSeconds: 0 +{% if openshift_master_ha | bool %} +controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }} +{% endif %} +controllers: '*' corsAllowedOrigins: -{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %} +{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %} - {{ origin }} {% endfor %} {% for custom_origin in openshift.master.custom_cors_origins | default("") %} - {{ custom_origin }} {% endfor %} +{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %} + - {{ name }} +{% endfor %} {% if 'disabled_features' in openshift.master %} disabledFeatures: {{ openshift.master.disabled_features | to_json }} {% endif %} {% if openshift.master.embedded_dns | bool %} dnsConfig: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }} + bindNetwork: tcp4 {% endif %} etcdClientInfo: ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} @@ -70,16 +79,15 @@ kubeletClientInfo: port: 10250 {% if openshift.master.embedded_kube | bool %} kubernetesMasterConfig: +{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} apiLevels: -{% if openshift.common.deployment_type == "enterprise" %} - v1beta3 -{% endif %} - v1 +{% endif %} apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }} controllerArguments: {{ controller_args if controller_args is defined else 'null' }} -{# TODO: support overriding masterCount #} - masterCount: 1 - masterIP: "" + masterCount: {{ openshift.master.master_count }} + masterIP: {{ openshift.common.ip }} podEvictionTimeout: "" proxyClientInfo: certFile: master.proxy-client.crt @@ -103,6 +111,7 @@ networkConfig: # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet serviceNetworkCIDR: {{ openshift.master.portal_net }} {% include 'v1_partials/oauthConfig.j2' %} +pauseControllers: false policyConfig: bootstrapPolicyFile: {{ openshift_master_policy }} openshiftInfrastructureNamespace: openshift-infra @@ -118,6 +127,7 @@ projectConfig: routingConfig: subdomain: "{{ openshift.master.default_subdomain | default("") }}" serviceAccountConfig: + limitSecretReferences: false managedNames: - default - builder @@ -128,8 +138,20 @@ serviceAccountConfig: - serviceaccounts.public.key servingInfo: bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} + bindNetwork: tcp4 certFile: master.server.crt clientCA: ca.crt keyFile: master.server.key maxRequestsInFlight: 500 requestTimeoutSeconds: 3600 +{% if named_certificates %} + namedCertificates: +{% for named_certificate in named_certificates %} + - certFile: {{ named_certificate['certfile'] }} + keyFile: {{ named_certificate['keyfile'] }} + names: +{% for name in named_certificate['names'] %} + - "{{ name }}" +{% endfor %} +{% endfor %} +{% endif %} diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 new file mode 100644 index 000000000..d12d9db90 --- /dev/null +++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: SessionSecrets +secrets: +{% for secret in openshift_master_session_auth_secrets %} +- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}" + encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}" +{% endfor %} diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml index ecdb4f883..534465451 100644 --- a/roles/openshift_master/vars/main.yml +++ b/roles/openshift_master/vars/main.yml @@ -2,6 +2,7 @@ openshift_master_config_dir: "{{ openshift.common.config_base }}/master" openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml" openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json" +openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" openshift_version: "{{ openshift_pkg_version | default('') }}" diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index cfd1ceabf..314f068e7 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -14,7 +14,7 @@ - name: Create the master certificates if they do not already exist command: > {{ openshift.common.admin_binary }} create-master-certs - --hostnames={{ openshift.common.all_hostnames | join(',') }} + --hostnames={{ master_hostnames | join(',') }} --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_master_config_dir }} --overwrite=false diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 4b39b043a..13e5d7a4b 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -6,13 +6,9 @@ mode: 0700 with_items: masters_needing_certs -- file: - src: "{{ openshift_master_config_dir }}/{{ item.1 }}" - dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" - state: hard - with_nested: - - masters_needing_certs - - - ca.crt +- set_fact: + master_certificates: + - ca.crt - ca.key - ca.serial.txt - admin.crt @@ -20,8 +16,6 @@ - admin.kubeconfig - master.kubelet-client.crt - master.kubelet-client.key - - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" - - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}" - openshift-master.crt - openshift-master.key - openshift-master.kubeconfig @@ -33,9 +27,17 @@ - openshift-router.kubeconfig - serviceaccounts.private.key - serviceaccounts.public.key + master_31_certificates: + - master.proxy-client.crt + - master.proxy-client.key -- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}" - with_items: masters_needing_certs +- file: + src: "{{ openshift_master_config_dir }}/{{ item.1 }}" + dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" + state: hard + with_nested: + - masters_needing_certs + - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}" - name: Create the master certificates if they do not already exist command: > diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml deleted file mode 100644 index 3b416005b..000000000 --- a/roles/openshift_master_cluster/tasks/configure_deferred.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- debug: msg="Deferring config" - -- name: Start and enable the master - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml index 315947183..6303a6e46 100644 --- a/roles/openshift_master_cluster/tasks/main.yml +++ b/roles/openshift_master_cluster/tasks/main.yml @@ -4,10 +4,7 @@ register: pcs_status changed_when: false failed_when: false - when: not openshift.master.cluster_defer_ha | bool + when: openshift.master.cluster_method == "pacemaker" - include: configure.yml when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" - -- include: configure_deferred.yml - when: openshift.master.cluster_defer_ha | bool diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index c92008a77..9d40ae3b3 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - { role: openshift_common } +- { role: docker } diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index aea60b75c..d11bc5123 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -8,7 +8,7 @@ when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip - fail: msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." - when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online'] + when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] - name: Set node facts openshift_facts: @@ -22,16 +22,17 @@ deployment_type: "{{ openshift_deployment_type }}" - role: node local_facts: - labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" annotations: "{{ openshift_node_annotations | default(none) }}" - registry_url: "{{ oreg_url | default(none) }}" debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" - portal_net: "{{ openshift_master_portal_net | default(None) }}" - kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}" docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}" + iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" + kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" + labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" + registry_url: "{{ oreg_url | default(none) }}" + schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 4931d127e..7d2f506e3 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -4,6 +4,7 @@ dnsDomain: {{ osn_cluster_dns_domain }} dnsIP: {{ osn_cluster_dns_ip }} dockerConfig: execHandlerName: "" +iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" imageConfig: format: {{ openshift.node.registry_url }} latest: false @@ -22,6 +23,7 @@ networkConfig: {% if openshift.common.use_openshift_sdn %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} +nodeIP: {{ openshift.common.ip }} nodeName: {{ openshift.common.hostname | lower }} podManifestConfig: servingInfo: diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 12e98b7a1..aa696ae12 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -8,7 +8,7 @@ # proper repos correctly. - assert: - that: openshift_deployment_type in known_openshift_deployment_types + that: openshift.common.deployment_type in known_openshift_deployment_types - name: Ensure libselinux-python is installed yum: diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml index aeeec4b8d..04665be62 100644 --- a/roles/os_zabbix/vars/template_os_linux.yml +++ b/roles/os_zabbix/vars/template_os_linux.yml @@ -194,6 +194,16 @@ g_template_os_linux: lifetime: 1 description: "Dynamically register the filesystems" + - name: disc.disk + key: disc.disk + lifetime: 1 + description: "Dynamically register disks on a node" + + - name: disc.network + key: disc.network + lifetime: 1 + description: "Dynamically register network interfaces on a node" + zitemprototypes: - discoveryrule_key: disc.filesys name: "disc.filesys.full.{#OSO_FILESYS}" @@ -211,6 +221,42 @@ g_template_os_linux: applications: - Disk + - discoveryrule_key: disc.disk + name: "TPS (IOPS) for disk {#OSO_DISK}" + key: "disc.disk.tps[{#OSO_DISK}]" + value_type: int + description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using" + applications: + - Disk + + - discoveryrule_key: disc.disk + name: "Percent Utilized for disk {#OSO_DISK}" + key: "disc.disk.putil[{#OSO_DISK}]" + value_type: float + description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command" + applications: + - Disk + + - discoveryrule_key: disc.network + name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}" + key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]" + value_type: int + units: B + delta: 1 + description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second" + applications: + - Network + + - discoveryrule_key: disc.network + name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}" + key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]" + value_type: int + units: B + delta: 1 + description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second" + applications: + - Network + ztriggerprototypes: - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}' expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85' diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index 8fb2fc042..30c0920a1 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -6,19 +6,26 @@ - set_fact: rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}" rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}" + rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}" - fail: msg: "This role is only supported for Red Hat hosts" when: ansible_distribution != 'RedHat' - fail: - msg: Either rsub_user or the rhel_subscription_user env variable are required for this role. + msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role. when: rhel_subscription_user is not defined - fail: - msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role. + msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role. when: rhel_subscription_pass is not defined +- name: Satellite preparation + command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + args: + creates: /etc/rhsm/ca/katello-server-ca.pem + when: rhel_subscription_server is defined and rhel_subscription_server + - name: RedHat subscriptions redhat_subscription: username: "{{ rhel_subscription_user }}" diff --git a/roles/tito/README.md b/roles/tito/README.md new file mode 100644 index 000000000..c4e2856dc --- /dev/null +++ b/roles/tito/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +This role manages Tito. + +https://github.com/dgoodwin/tito + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: servers + roles: + - role: tito + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Thomas Wiest diff --git a/roles/tito/defaults/main.yml b/roles/tito/defaults/main.yml new file mode 100644 index 000000000..dd7cd269e --- /dev/null +++ b/roles/tito/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for tito diff --git a/roles/tito/handlers/main.yml b/roles/tito/handlers/main.yml new file mode 100644 index 000000000..e9ce609d5 --- /dev/null +++ b/roles/tito/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for tito diff --git a/roles/tito/meta/main.yml b/roles/tito/meta/main.yml new file mode 100644 index 000000000..fb121c08e --- /dev/null +++ b/roles/tito/meta/main.yml @@ -0,0 +1,14 @@ +--- +galaxy_info: + author: Thomas Wiest + description: Manages Tito + company: Red Hat + license: Apache License, Version 2.0 + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 7 + categories: + - packaging +dependencies: [] diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml new file mode 100644 index 000000000..f7b4ef363 --- /dev/null +++ b/roles/tito/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- yum: + name: tito + state: present diff --git a/roles/tito/vars/main.yml b/roles/tito/vars/main.yml new file mode 100644 index 000000000..8a1aafc41 --- /dev/null +++ b/roles/tito/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for tito diff --git a/test/units/README.md b/test/units/README.md index 3bed227eb..78a02c3ea 100644 --- a/test/units/README.md +++ b/test/units/README.md @@ -4,4 +4,4 @@ These should be run by sourcing the env-setup: $ source test/env-setup Then navigate to the test/units/ directory. -$ python -m unittest multi_ec2_test +$ python -m unittest multi_inventory_test diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py new file mode 100755 index 000000000..168cd82b7 --- /dev/null +++ b/test/units/multi_inventory_test.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python2 +''' + Unit tests for MultiInventory +''' + +import unittest +import multi_inventory + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name +class MultiInventoryTest(unittest.TestCase): + ''' + Test class for multiInventory + ''' + +# def setUp(self): +# '''setup method''' +# pass + + def test_merge_simple_1(self): + '''Testing a simple merge of 2 dictionaries''' + a = {"key1" : 1} + b = {"key1" : 2} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2]}) + + def test_merge_b_empty(self): + '''Testing a merge of an emtpy dictionary''' + a = {"key1" : 1} + b = {} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": 1}) + + def test_merge_a_empty(self): + '''Testing a merge of an emtpy dictionary''' + b = {"key1" : 1} + a = {} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": 1}) + + def test_merge_hash_array(self): + '''Testing a merge of a dictionary and a dictionary with an array''' + a = {"key1" : {"hasha": 1}} + b = {"key1" : [1, 2]} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]}) + + def test_merge_array_hash(self): + '''Testing a merge of a dictionary with an array and a dictionary with a hash''' + a = {"key1" : [1, 2]} + b = {"key1" : {"hasha": 1}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]}) + + def test_merge_keys_1(self): + '''Testing a merge on a dictionary for keys''' + a = {"key1" : [1, 2], "key2" : {"hasha": 2}} + b = {"key2" : {"hashb": 1}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}}) + + def test_merge_recursive_1(self): + '''Testing a recursive merge''' + a = {"a" : {"b": {"c": 1}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}}) + + def test_merge_recursive_array_item(self): + '''Testing a recursive merge for an array''' + a = {"a" : {"b": {"c": [1]}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}}) + + def test_merge_recursive_hash_item(self): + '''Testing a recursive merge for a hash''' + a = {"a" : {"b": {"c": {"d": 1}}}} + b = {"a" : {"b": {"c": 2}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}}) + + def test_merge_recursive_array_hash(self): + '''Testing a recursive merge for an array and a hash''' + a = {"a" : [{"b": {"c": 1}}]} + b = {"a" : {"b": {"c": 1}}} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) + + def test_merge_recursive_hash_array(self): + '''Testing a recursive merge for an array and a hash''' + a = {"a" : {"b": {"c": 1}}} + b = {"a" : [{"b": {"c": 1}}]} + result = {} + _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]] + self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) + +# def tearDown(self): +# '''TearDown method''' +# pass + +if __name__ == "__main__": + unittest.main() diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py deleted file mode 100755 index 95df93cd2..000000000 --- a/test/units/mutli_ec2_test.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python2 - -import unittest -import sys -import os -import sys -import multi_ec2 - -class MultiEc2Test(unittest.TestCase): - - def setUp(self): - pass - - def test_merge_simple_1(self): - a = {"key1" : 1} - b = {"key1" : 2} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2]}) - - def test_merge_b_empty(self): - a = {"key1" : 1} - b = {} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": 1}) - - def test_merge_a_empty(self): - b = {"key1" : 1} - a = {} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": 1}) - - def test_merge_hash_array(self): - a = {"key1" : {"hasha": 1}} - b = {"key1" : [1,2]} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]}) - - def test_merge_array_hash(self): - a = {"key1" : [1,2]} - b = {"key1" : {"hasha": 1}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]}) - - def test_merge_keys_1(self): - a = {"key1" : [1,2], "key2" : {"hasha": 2}} - b = {"key2" : {"hashb": 1}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}}) - - def test_merge_recursive_1(self): - a = {"a" : {"b": {"c": 1}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [1,2]}}}) - - def test_merge_recursive_array_item(self): - a = {"a" : {"b": {"c": [1]}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [1,2]}}}) - - def test_merge_recursive_hash_item(self): - a = {"a" : {"b": {"c": {"d": 1}}}} - b = {"a" : {"b": {"c": 2}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}}) - - def test_merge_recursive_array_hash(self): - a = {"a" : [{"b": {"c": 1}}]} - b = {"a" : {"b": {"c": 1}}} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) - - def test_merge_recursive_hash_array(self): - a = {"a" : {"b": {"c": 1}}} - b = {"a" : [{"b": {"c": 1}}]} - result = {} - [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]] - self.assertEqual(result, {"a": [{"b": {"c": 1}}]}) - - def tearDown(self): - pass - -if __name__ == "__main__": - unittest.main() diff --git a/utils/docs/config.md b/utils/docs/config.md index 9399409dd..2729f8d37 100644 --- a/utils/docs/config.md +++ b/utils/docs/config.md @@ -7,6 +7,7 @@ The default location this config file will be written to ~/.config/openshift/ins ## Example ``` +version: v1 variant: openshift-enterprise variant_version: 3.0 ansible_ssh_user: root @@ -18,20 +19,27 @@ hosts: master: true node: true containerized: true + connect_to: 24.222.0.1 - ip: 10.0.0.2 hostname: node1-private.example.com public_ip: 24.222.0.2 public_hostname: node1.example.com node: true + connect_to: 10.0.0.2 - ip: 10.0.0.3 hostname: node2-private.example.com public_ip: 24.222.0.3 public_hostname: node2.example.com node: true + connect_to: 10.0.0.3 ``` ## Primary Settings +### version + +Indicates the version of configuration this file was written with. Current implementation is v1. + ### variant The OpenShift variant to install. Currently valid options are: diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index ffcfc5db2..3c3f45c3b 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -11,7 +11,7 @@ from ooinstall import OOConfig from ooinstall.oo_config import Host from ooinstall.variants import find_variant, get_variant_version_combos -DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg' +DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg' DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/' def validate_ansible_dir(path): @@ -101,29 +101,26 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen hosts = [] more_hosts = True - ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$') - while more_hosts: host_props = {} hostname_or_ip = click.prompt('Enter hostname or IP address:', default='', value_proc=validate_prompt_hostname) - if ip_regex.match(hostname_or_ip): - host_props['ip'] = hostname_or_ip - else: - host_props['hostname'] = hostname_or_ip + host_props['connect_to'] = hostname_or_ip host_props['master'] = click.confirm('Will this host be an OpenShift Master?') host_props['node'] = True - rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', - type=click.Choice(['rpm', 'container']), - default='rpm') - if rpm_or_container == 'container': - host_props['containerized'] = True - else: - host_props['containerized'] = False + #TODO: Reenable this option once container installs are out of tech preview + #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?', + # type=click.Choice(['rpm', 'container']), + # default='rpm') + #if rpm_or_container == 'container': + # host_props['containerized'] = True + #else: + # host_props['containerized'] = False + host_props['containerized'] = False host = Host(**host_props) @@ -150,7 +147,7 @@ Plese confirm that they are correct before moving forward. notes = """ Format: -IP,public IP,hostname,public hostname +connect_to,IP,public IP,hostname,public hostname Notes: * The installation host is the hostname from the installer's perspective. @@ -168,20 +165,20 @@ Notes: default_facts_lines = [] default_facts = {} - validated_facts = {} for h in hosts: - default_facts[h] = {} - h.ip = callback_facts[str(h)]["common"]["ip"] - h.public_ip = callback_facts[str(h)]["common"]["public_ip"] - h.hostname = callback_facts[str(h)]["common"]["hostname"] - h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"] - - validated_facts[h] = {} - default_facts_lines.append(",".join([h.ip, + default_facts[h.connect_to] = {} + h.ip = callback_facts[h.connect_to]["common"]["ip"] + h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"] + h.hostname = callback_facts[h.connect_to]["common"]["hostname"] + h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"] + + default_facts_lines.append(",".join([h.connect_to, + h.ip, h.public_ip, h.hostname, h.public_hostname])) - output = "%s\n%s" % (output, ",".join([h.ip, + output = "%s\n%s" % (output, ",".join([h.connect_to, + h.ip, h.public_ip, h.hostname, h.public_hostname])) @@ -191,7 +188,7 @@ Notes: facts_confirmed = click.confirm("Do the above facts look correct?") if not facts_confirmed: message = """ -Edit %s with the desired values and rerun atomic-openshift-installer with --unattended . +Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install. """ % oo_cfg.config_path click.echo(message) # Make sure we actually write out the config file. @@ -316,14 +313,16 @@ Add new nodes here def get_installed_hosts(hosts, callback_facts): installed_hosts = [] for host in hosts: - if(host.name in callback_facts.keys() - and 'common' in callback_facts[host.name].keys() - and callback_facts[host.name]['common'].get('version', '') - and callback_facts[host.name]['common'].get('version', '') != 'None'): + if(host.connect_to in callback_facts.keys() + and 'common' in callback_facts[host.connect_to].keys() + and callback_facts[host.connect_to]['common'].get('version', '') + and callback_facts[host.connect_to]['common'].get('version', '') != 'None'): installed_hosts.append(host) return installed_hosts -def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): +# pylint: disable=too-many-branches +# This pylint error will be corrected shortly in separate PR. +def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose): # Copy the list of existing hosts so we can remove any already installed nodes. hosts_to_run_on = list(oo_cfg.hosts) @@ -331,7 +330,24 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): # Check if master or nodes already have something installed installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts) if len(installed_hosts) > 0: - # present a message listing already installed hosts + click.echo('Installed environment detected.') + # This check has to happen before we start removing hosts later in this method + if not force: + if not unattended: + click.echo('By default the installer only adds new nodes to an installed environment.') + response = click.prompt('Do you want to (1) only add additional nodes or ' \ + '(2) reinstall the existing hosts ' \ + 'potentially erasing any custom changes?', + type=int) + # TODO: this should be reworked with error handling. + # Click can certainly do this for us. + # This should be refactored as soon as we add a 3rd option. + if response == 1: + force = False + if response == 2: + force = True + + # present a message listing already installed hosts and remove hosts if needed for host in installed_hosts: if host.master: click.echo("{} is already an OpenShift Master".format(host)) @@ -339,32 +355,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): # new nodes. elif host.node: click.echo("{} is already an OpenShift Node".format(host)) - hosts_to_run_on.remove(host) - # for unattended either continue if they force install or exit if they didn't - if unattended: - if not force: - click.echo('Installed environment detected and no additional nodes specified: ' \ - 'aborting. If you want a fresh install, use --force') - sys.exit(1) - # for attended ask the user what to do + # force is only used for reinstalls so we don't want to remove + # anything. + if not force: + hosts_to_run_on.remove(host) + + # Handle the cases where we know about uninstalled systems + new_hosts = set(hosts_to_run_on) - set(installed_hosts) + if len(new_hosts) > 0: + for new_host in new_hosts: + click.echo("{} is currently uninstalled".format(new_host)) + + # Fall through + click.echo('Adding additional nodes...') else: - click.echo('Installed environment detected and no additional nodes specified. ') - response = click.prompt('Do you want to (1) add more nodes or ' \ - '(2) perform a clean install?', type=int) - if response == 1: # add more nodes - new_nodes = collect_new_nodes() - - hosts_to_run_on.extend(new_nodes) - oo_cfg.hosts.extend(new_nodes) - - openshift_ansible.set_config(oo_cfg) - callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) - if error: - click.echo("There was a problem fetching the required information. " \ - "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) + if unattended: + if not force: + click.echo('Installed environment detected and no additional nodes specified: ' \ + 'aborting. If you want a fresh install, use ' \ + '`atomic-openshift-installer install --force`') sys.exit(1) else: - pass # proceeding as normal should do a clean install + if not force: + new_nodes = collect_new_nodes() + + hosts_to_run_on.extend(new_nodes) + oo_cfg.hosts.extend(new_nodes) + + openshift_ansible.set_config(oo_cfg) + click.echo('Gathering information from hosts...') + callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose) + if error: + click.echo("There was a problem fetching the required information. " \ + "See {} for details.".format(oo_cfg.settings['ansible_log_path'])) + sys.exit(1) + else: + pass # proceeding as normal should do a clean install return hosts_to_run_on, callback_facts @@ -385,7 +411,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): dir_okay=True, readable=True), # callback=validate_ansible_dir, - default='/usr/share/openshift-ansible/', + default=DEFAULT_PLAYBOOK_DIR, envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY') @click.option('--ansible-config', type=click.Path(file_okay=True, @@ -399,18 +425,23 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force): writable=True, readable=True), default="/tmp/ansible.log") +@click.option('-v', '--verbose', + is_flag=True, default=False) #pylint: disable=too-many-arguments # Main CLI entrypoint, not much we can do about too many arguments. -def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path): +def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose): """ - The main click CLI module. Responsible for handling most common CLI options, - assigning any defaults and adding to the context for the sub-commands. + atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host. + It can also be run in unattended mode if provided with a configuration file. + + Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html """ ctx.obj = {} ctx.obj['unattended'] = unattended ctx.obj['configuration'] = configuration ctx.obj['ansible_config'] = ansible_config ctx.obj['ansible_log_path'] = ansible_log_path + ctx.obj['verbose'] = verbose oo_cfg = OOConfig(ctx.obj['configuration']) @@ -441,6 +472,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf @click.pass_context def uninstall(ctx): oo_cfg = ctx.obj['oo_cfg'] + verbose = ctx.obj['verbose'] if len(oo_cfg.hosts) == 0: click.echo("No hosts defined in: %s" % oo_cfg['configuration']) @@ -450,14 +482,53 @@ def uninstall(ctx): if not ctx.obj['unattended']: # Prompt interactively to confirm: for host in oo_cfg.hosts: - click.echo(" * %s" % host.name) + click.echo(" * %s" % host.connect_to) proceed = click.confirm("\nDo you wish to proceed?") if not proceed: click.echo("Uninstall cancelled.") sys.exit(0) - openshift_ansible.run_uninstall_playbook() + openshift_ansible.run_uninstall_playbook(verbose) + + +@click.command() +@click.pass_context +def upgrade(ctx): + oo_cfg = ctx.obj['oo_cfg'] + verbose = ctx.obj['verbose'] + + if len(oo_cfg.hosts) == 0: + click.echo("No hosts defined in: %s" % oo_cfg.config_path) + sys.exit(1) + + # Update config to reflect the version we're targetting, we'll write + # to disk once ansible completes successfully, not before. + old_variant = oo_cfg.settings['variant'] + old_version = oo_cfg.settings['variant_version'] + if oo_cfg.settings['variant'] == 'enterprise': + oo_cfg.settings['variant'] = 'openshift-enterprise' + version = find_variant(oo_cfg.settings['variant'])[1] + oo_cfg.settings['variant_version'] = version.name + click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % ( + old_variant, old_version, oo_cfg.settings['variant'], + oo_cfg.settings['variant_version'])) + for host in oo_cfg.hosts: + click.echo(" * %s" % host.connect_to) + if not ctx.obj['unattended']: + # Prompt interactively to confirm: + proceed = click.confirm("\nDo you wish to proceed?") + if not proceed: + click.echo("Upgrade cancelled.") + sys.exit(0) + + retcode = openshift_ansible.run_upgrade_playbook(verbose) + if retcode > 0: + click.echo("Errors encountered during upgrade, please check %s." % + oo_cfg.settings['ansible_log_path']) + else: + oo_cfg.save_to_disk() + click.echo("Upgrade completed! Rebooting all hosts is recommended.") @click.command() @@ -465,6 +536,7 @@ def uninstall(ctx): @click.pass_context def install(ctx, force): oo_cfg = ctx.obj['oo_cfg'] + verbose = ctx.obj['verbose'] if ctx.obj['unattended']: error_if_missing_info(oo_cfg) @@ -472,13 +544,15 @@ def install(ctx, force): oo_cfg = get_missing_info_from_user(oo_cfg) click.echo('Gathering information from hosts...') - callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts) + callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, + verbose) if error: click.echo("There was a problem fetching the required information. " \ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path'])) sys.exit(1) - hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force) + hosts_to_run_on, callback_facts = get_hosts_to_run_on( + oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose) click.echo('Writing config to: %s' % oo_cfg.config_path) @@ -500,7 +574,7 @@ If changes are needed to the values recorded by the installer please update {}. confirm_continue(message) error = openshift_ansible.run_main_playbook(oo_cfg.hosts, - hosts_to_run_on) + hosts_to_run_on, verbose) if error: # The bootstrap script will print out the log location. message = """ @@ -523,6 +597,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html click.pause() cli.add_command(install) +cli.add_command(upgrade) cli.add_command(uninstall) if __name__ == '__main__': diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py index a2f53cf78..9c97e6e93 100644 --- a/utils/src/ooinstall/oo_config.py +++ b/utils/src/ooinstall/oo_config.py @@ -12,6 +12,7 @@ PERSIST_SETTINGS = [ 'ansible_log_path', 'variant', 'variant_version', + 'version', ] REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname'] @@ -34,6 +35,7 @@ class Host(object): self.hostname = kwargs.get('hostname', None) self.public_ip = kwargs.get('public_ip', None) self.public_hostname = kwargs.get('public_hostname', None) + self.connect_to = kwargs.get('connect_to', None) # Should this host run as an OpenShift master: self.master = kwargs.get('master', False) @@ -42,30 +44,25 @@ class Host(object): self.node = kwargs.get('node', False) self.containerized = kwargs.get('containerized', False) - if self.ip is None and self.hostname is None: - raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'") + if self.connect_to is None: + raise OOConfigInvalidHostError("You must specify either and 'ip' " \ + "or 'hostname' to connect to.") if self.master is False and self.node is False: raise OOConfigInvalidHostError( "You must specify each host as either a master or a node.") - # Hosts can be specified with an ip, hostname, or both. However we need - # something authoritative we can connect to and refer to the host by. - # Preference given to the IP if specified as this is more specific. - # We know one must be set by this point. - self.name = self.ip if self.ip is not None else self.hostname - def __str__(self): - return self.name + return self.connect_to def __repr__(self): - return self.name + return self.connect_to def to_dict(self): """ Used when exporting to yaml. """ d = {} for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', - 'master', 'node', 'containerized']: + 'master', 'node', 'containerized', 'connect_to']: # If the property is defined (not None or False), export it: if getattr(self, prop): d[prop] = getattr(self, prop) @@ -73,7 +70,6 @@ class Host(object): class OOConfig(object): - new_config = True default_dir = os.path.normpath( os.environ.get('XDG_CONFIG_HOME', os.environ['HOME'] + '/.config/') + '/openshift/') @@ -86,19 +82,22 @@ class OOConfig(object): self.config_path = os.path.normpath(self.default_dir + self.default_file) self.settings = {} - self.read_config() - self.set_defaults() + self._read_config() + self._set_defaults() - def read_config(self, is_new=False): + def _read_config(self): self.hosts = [] try: - new_settings = None if os.path.exists(self.config_path): cfgfile = open(self.config_path, 'r') - new_settings = yaml.safe_load(cfgfile.read()) + self.settings = yaml.safe_load(cfgfile.read()) cfgfile.close() - if new_settings: - self.settings = new_settings + + # Use the presence of a Description as an indicator this is + # a legacy config file: + if 'Description' in self.settings: + self._upgrade_legacy_config() + # Parse the hosts into DTO objects: if 'hosts' in self.settings: for host in self.settings['hosts']: @@ -114,9 +113,31 @@ class OOConfig(object): ferr.strerror)) except yaml.scanner.ScannerError: raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path)) - self.new_config = is_new - def set_defaults(self): + def _upgrade_legacy_config(self): + new_hosts = [] + remove_settings = ['validated_facts', 'Description', 'Name', + 'Subscription', 'Vendor', 'Version', 'masters', 'nodes'] + + if 'validated_facts' in self.settings: + for key, value in self.settings['validated_facts'].iteritems(): + value['connect_to'] = key + if 'masters' in self.settings and key in self.settings['masters']: + value['master'] = True + if 'nodes' in self.settings and key in self.settings['nodes']: + value['node'] = True + new_hosts.append(value) + self.settings['hosts'] = new_hosts + + for s in remove_settings: + if s in self.settings: + del self.settings[s] + + # A legacy config implies openshift-enterprise 3.0: + self.settings['variant'] = 'openshift-enterprise' + self.settings['variant_version'] = '3.0' + + def _set_defaults(self): if 'ansible_inventory_directory' not in self.settings: self.settings['ansible_inventory_directory'] = \ @@ -125,6 +146,8 @@ class OOConfig(object): os.makedirs(self.settings['ansible_inventory_directory']) if 'ansible_plugins_directory' not in self.settings: self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins') + if 'version' not in self.settings: + self.settings['version'] = 'v1' if 'ansible_callback_facts_yaml' not in self.settings: self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \ @@ -158,7 +181,7 @@ class OOConfig(object): if not getattr(host, required_fact): missing_facts.append(required_fact) if len(missing_facts) > 0: - result[host.name] = missing_facts + result[host.connect_to] = missing_facts return result def save_to_disk(self): @@ -190,6 +213,6 @@ class OOConfig(object): def get_host(self, name): for host in self.hosts: - if host.name == name: + if host.connect_to == name: return host return None diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 0def72cfd..372f27bda 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -16,10 +16,8 @@ def set_config(cfg): CFG = cfg def generate_inventory(hosts): - print hosts global CFG - installer_host = socket.gethostname() base_inventory_path = CFG.settings['ansible_inventory_path'] base_inventory = open(base_inventory_path, 'w') base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n') @@ -33,25 +31,18 @@ def generate_inventory(hosts): version=CFG.settings.get('variant_version', None))[1] base_inventory.write('deployment_type={}\n'.format(ver.ansible_key)) - if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ: - base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:' - '5001/openshift3/ose-${component}:${version}\n') - if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ: - base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', " + if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ: + base_inventory.write('cli_docker_additional_registries={}\n' + .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES'])) + if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ: + base_inventory.write('cli_docker_insecure_registries={}\n' + .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES'])) + if 'OO_INSTALL_PUDDLE_REPO' in os.environ: + # We have to double the '{' here for literals + base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', " "'name': 'ose-devel', " - "'baseurl': 'http://buildvm-devops.usersys.redhat.com" - "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', " - "'enabled': 1, 'gpgcheck': 0}]\n") - if 'OO_INSTALL_STAGE_REGISTRY' in os.environ: - base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n') - - if any(host.hostname == installer_host or host.public_hostname == installer_host - for host in hosts): - no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive']) - if no_pwd_sudo == 1: - print 'The atomic-openshift-installer requires sudo access without a password.' - sys.exit(1) - base_inventory.write("ansible_connection=local\n") + "'baseurl': '{}', " + "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO'])) base_inventory.write('\n[masters]\n') masters = (host for host in hosts if host.master) @@ -73,6 +64,7 @@ def generate_inventory(hosts): def write_host(host, inventory, scheduleable=True): global CFG + facts = '' if host.ip: facts += ' openshift_ip={}'.format(host.ip) @@ -86,19 +78,30 @@ def write_host(host, inventory, scheduleable=True): # Technically only nodes will ever need this. if not scheduleable: facts += ' openshift_scheduleable=False' - inventory.write('{} {}\n'.format(host, facts)) + installer_host = socket.gethostname() + if installer_host in [host.connect_to, host.hostname, host.public_hostname]: + facts += ' ansible_connection=local' + if os.geteuid() != 0: + no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift']) + if no_pwd_sudo == 1: + print 'The atomic-openshift-installer requires sudo access without a password.' + sys.exit(1) + facts += ' ansible_become=true' + + inventory.write('{} {}\n'.format(host.connect_to, facts)) -def load_system_facts(inventory_file, os_facts_path, env_vars): +def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False): """ Retrieves system facts from the remote systems. """ FNULL = open(os.devnull, 'w') - status = subprocess.call(['ansible-playbook', - '--inventory-file={}'.format(inventory_file), - os_facts_path], - env=env_vars, - stdout=FNULL) + args = ['ansible-playbook', '-v'] if verbose \ + else ['ansible-playbook'] + args.extend([ + '--inventory-file={}'.format(inventory_file), + os_facts_path]) + status = subprocess.call(args, env=env_vars, stdout=FNULL) if not status == 0: return [], 1 callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r') @@ -107,7 +110,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars): return callback_facts, 0 -def default_facts(hosts): +def default_facts(hosts, verbose=False): global CFG inventory_file = generate_inventory(hosts) os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory) @@ -119,12 +122,12 @@ def default_facts(hosts): facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return load_system_facts(inventory_file, os_facts_path, facts_env) + return load_system_facts(inventory_file, os_facts_path, facts_env, verbose) -def run_main_playbook(hosts, hosts_to_run_on): +def run_main_playbook(hosts, hosts_to_run_on, verbose=False): global CFG - inventory_file = generate_inventory(hosts) + inventory_file = generate_inventory(hosts_to_run_on) if len(hosts_to_run_on) != len(hosts): main_playbook_path = os.path.join(CFG.ansible_playbook_directory, 'playbooks/common/openshift-cluster/scaleup.yml') @@ -136,16 +139,19 @@ def run_main_playbook(hosts, hosts_to_run_on): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return run_ansible(main_playbook_path, inventory_file, facts_env) + return run_ansible(main_playbook_path, inventory_file, facts_env, verbose) + +def run_ansible(playbook, inventory, env_vars, verbose=False): + args = ['ansible-playbook', '-v'] if verbose \ + else ['ansible-playbook'] + args.extend([ + '--inventory-file={}'.format(inventory), + playbook]) + return subprocess.call(args, env=env_vars) -def run_ansible(playbook, inventory, env_vars): - return subprocess.call(['ansible-playbook', - '--inventory-file={}'.format(inventory), - playbook], - env=env_vars) -def run_uninstall_playbook(): +def run_uninstall_playbook(verbose=False): playbook = os.path.join(CFG.settings['ansible_playbook_directory'], 'playbooks/adhoc/uninstall.yml') inventory_file = generate_inventory(CFG.hosts) @@ -154,4 +160,20 @@ def run_uninstall_playbook(): facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] if 'ansible_config' in CFG.settings: facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] - return run_ansible(playbook, inventory_file, facts_env) + return run_ansible(playbook, inventory_file, facts_env, verbose) + + +def run_upgrade_playbook(verbose=False): + # TODO: do not hardcode the upgrade playbook, add ability to select the + # right playbook depending on the type of upgrade. + playbook = os.path.join(CFG.settings['ansible_playbook_directory'], + 'playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml') + # TODO: Upgrade inventory for upgrade? + inventory_file = generate_inventory(CFG.hosts) + facts_env = os.environ.copy() + if 'ansible_log_path' in CFG.settings: + facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] + if 'ansible_config' in CFG.settings: + facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config'] + return run_ansible(playbook, inventory_file, facts_env, verbose) + diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py index 05281d654..3bb61dddb 100644 --- a/utils/src/ooinstall/variants.py +++ b/utils/src/ooinstall/variants.py @@ -29,6 +29,9 @@ class Variant(object): self.versions = versions + def latest_version(self): + return self.versions[-1] + # WARNING: Keep the versions ordered, most recent last: OSE = Variant('openshift-enterprise', 'OpenShift Enterprise', @@ -58,7 +61,7 @@ def find_variant(name, version=None): for prod in SUPPORTED_VARIANTS: if prod.name == name: if version is None: - return (prod, prod.versions[-1]) + return (prod, prod.latest_version()) for v in prod.versions: if v.name == version: return (prod, v) diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index b183f0acb..fc16d9ceb 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -46,18 +46,21 @@ SAMPLE_CONFIG = """ variant: %s ansible_ssh_user: root hosts: - - ip: 10.0.0.1 + - connect_to: 10.0.0.1 + ip: 10.0.0.1 hostname: master-private.example.com public_ip: 24.222.0.1 public_hostname: master.example.com master: true node: true - - ip: 10.0.0.2 + - connect_to: 10.0.0.2 + ip: 10.0.0.2 hostname: node1-private.example.com public_ip: 24.222.0.2 public_hostname: node1.example.com node: true - - ip: 10.0.0.3 + - connect_to: 10.0.0.3 + ip: 10.0.0.3 hostname: node2-private.example.com public_ip: 24.222.0.3 public_hostname: node2.example.com @@ -95,6 +98,76 @@ class OOCliFixture(OOInstallFixture): f.close() return config + def _verify_load_facts(self, load_facts_mock): + """ Check that we ran load facts with expected inputs. """ + load_facts_args = load_facts_mock.call_args[0] + self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"), + load_facts_args[0]) + self.assertEquals(os.path.join(self.work_dir, + "playbooks/byo/openshift_facts.yml"), load_facts_args[1]) + env_vars = load_facts_args[2] + self.assertEquals(os.path.join(self.work_dir, + '.ansible/callback_facts.yaml'), + env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) + self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) + + def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len): + """ Check that we ran playbook with expected inputs. """ + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(exp_hosts_len, len(hosts)) + self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) + + def _verify_config_hosts(self, written_config, host_count): + print written_config['hosts'] + self.assertEquals(host_count, len(written_config['hosts'])) + for h in written_config['hosts']: + self.assertTrue(h['node']) + self.assertTrue('ip' in h) + self.assertTrue('hostname' in h) + self.assertTrue('public_ip' in h) + self.assertTrue('public_hostname' in h) + + #pylint: disable=too-many-arguments + def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock, + run_playbook_mock, cli_input, + exp_hosts_len=None, exp_hosts_to_run_on_len=None, + force=None): + """ + Tests cli_installer.py:get_hosts_to_run_on. That method has quite a + few subtle branches in the logic. The goal with this method is simply + to handle all the messy stuff here and allow the main test cases to be + easily read. The basic idea is to modify mock_facts to return a + version indicating OpenShift is already installed on particular hosts. + """ + load_facts_mock.return_value = (mock_facts, 0) + run_playbook_mock.return_value = 0 + + if cli_input: + self.cli_args.append("install") + result = self.runner.invoke(cli.cli, + self.cli_args, + input=cli_input) + else: + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + if force: + self.cli_args.append("--force") + result = self.runner.invoke(cli.cli, self.cli_args) + written_config = self._read_yaml(config_file) + self._verify_config_hosts(written_config, exp_hosts_len) + + self.assert_result(result, 0) + self._verify_load_facts(load_facts_mock) + self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len) + + # Make sure we ran on the expected masters and nodes: + hosts = run_playbook_mock.call_args[0][0] + hosts_to_run_on = run_playbook_mock.call_args[0][1] + self.assertEquals(exp_hosts_len, len(hosts)) + self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) class UnattendedCliTests(OOCliFixture): @@ -102,6 +175,92 @@ class UnattendedCliTests(OOCliFixture): OOCliFixture.setUp(self) self.cli_args.append("-u") + # unattended with config file and all installed hosts (without --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock): + mock_facts = copy.deepcopy(MOCK_FACTS) + mock_facts['10.0.0.1']['common']['version'] = "3.0.0" + mock_facts['10.0.0.2']['common']['version'] = "3.0.0" + mock_facts['10.0.0.3']['common']['version'] = "3.0.0" + + load_facts_mock.return_value = (mock_facts, 0) + run_playbook_mock.return_value = 0 + + config_file = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise') + + self.cli_args.extend(["-c", config_file, "install"]) + result = self.runner.invoke(cli.cli, self.cli_args) + + if result.exception is None or result.exit_code != 1: + print "Exit code: %s" % result.exit_code + self.fail("Unexpected CLI return") + + # unattended with config file and all installed hosts (with --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock): + mock_facts = copy.deepcopy(MOCK_FACTS) + mock_facts['10.0.0.1']['common']['version'] = "3.0.0" + mock_facts['10.0.0.2']['common']['version'] = "3.0.0" + mock_facts['10.0.0.3']['common']['version'] = "3.0.0" + self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, + cli_input=None, + exp_hosts_len=3, + exp_hosts_to_run_on_len=3, + force=True) + + # unattended with config file and no installed hosts (without --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock, + cli_input=None, + exp_hosts_len=3, + exp_hosts_to_run_on_len=3, + force=False) + + # unattended with config file and no installed hosts (with --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock): + load_facts_mock.return_value = (MOCK_FACTS, 0) + run_playbook_mock.return_value = 0 + self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock, + cli_input=None, + exp_hosts_len=3, + exp_hosts_to_run_on_len=3, + force=True) + + # unattended with config file and some installed some uninstalled hosts (without --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock): + mock_facts = copy.deepcopy(MOCK_FACTS) + mock_facts['10.0.0.1']['common']['version'] = "3.0.0" + mock_facts['10.0.0.2']['common']['version'] = "3.0.0" + self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, + cli_input=None, + exp_hosts_len=3, + exp_hosts_to_run_on_len=2, + force=False) + + # unattended with config file and some installed some uninstalled hosts (with --force) + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock): + mock_facts = copy.deepcopy(MOCK_FACTS) + mock_facts['10.0.0.1']['common']['version'] = "3.0.0" + mock_facts['10.0.0.2']['common']['version'] = "3.0.0" + self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, + cli_input=None, + exp_hosts_len=3, + exp_hosts_to_run_on_len=3, + force=True) + @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_cfg_full_run(self, load_facts_mock, run_playbook_mock): @@ -329,7 +488,7 @@ class AttendedCliTests(OOCliFixture): for (host, is_master) in hosts: inputs.append(host) inputs.append('y' if is_master else 'n') - inputs.append('rpm') + #inputs.append('rpm') if i < len(hosts) - 1: inputs.append('y') # Add more hosts else: @@ -346,7 +505,7 @@ class AttendedCliTests(OOCliFixture): for (host, is_master) in add_nodes: inputs.append(host) inputs.append('y' if is_master else 'n') - inputs.append('rpm') + #inputs.append('rpm') if i < len(add_nodes) - 1: inputs.append('y') # Add more hosts else: @@ -360,35 +519,6 @@ class AttendedCliTests(OOCliFixture): return '\n'.join(inputs) - def _verify_load_facts(self, load_facts_mock): - """ Check that we ran load facts with expected inputs. """ - load_facts_args = load_facts_mock.call_args[0] - self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"), - load_facts_args[0]) - self.assertEquals(os.path.join(self.work_dir, - "playbooks/byo/openshift_facts.yml"), load_facts_args[1]) - env_vars = load_facts_args[2] - self.assertEquals(os.path.join(self.work_dir, - '.ansible/callback_facts.yaml'), - env_vars['OO_INSTALL_CALLBACK_FACTS_YAML']) - self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH']) - - def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len): - """ Check that we ran playbook with expected inputs. """ - hosts = run_playbook_mock.call_args[0][0] - hosts_to_run_on = run_playbook_mock.call_args[0][1] - self.assertEquals(exp_hosts_len, len(hosts)) - self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on)) - - def _verify_config_hosts(self, written_config, host_count): - self.assertEquals(host_count, len(written_config['hosts'])) - for h in written_config['hosts']: - self.assertTrue(h['node']) - self.assertTrue('ip' in h) - self.assertTrue('hostname' in h) - self.assertTrue('public_ip' in h) - self.assertTrue('public_hostname' in h) - @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_full_run(self, load_facts_mock, run_playbook_mock): @@ -413,6 +543,7 @@ class AttendedCliTests(OOCliFixture): written_config = self._read_yaml(self.config_file) self._verify_config_hosts(written_config, 3) + # interactive with config file and some installed some uninstalled hosts @patch('ooinstall.openshift_ansible.run_main_playbook') @patch('ooinstall.openshift_ansible.load_system_facts') def test_add_nodes(self, load_facts_mock, run_playbook_mock): @@ -469,6 +600,29 @@ class AttendedCliTests(OOCliFixture): written_config = self._read_yaml(config_file) self._verify_config_hosts(written_config, 3) + #interactive with config file and all installed hosts + @patch('ooinstall.openshift_ansible.run_main_playbook') + @patch('ooinstall.openshift_ansible.load_system_facts') + def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock): + mock_facts = copy.deepcopy(MOCK_FACTS) + mock_facts['10.0.0.1']['common']['version'] = "3.0.0" + mock_facts['10.0.0.2']['common']['version'] = "3.0.0" + + cli_input = self._build_input(hosts=[ + ('10.0.0.1', True), + ], + add_nodes=[('10.0.0.2', False)], + ssh_user='root', + variant_num=1, + confirm_facts='y') + + self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, + run_playbook_mock, + cli_input, + exp_hosts_len=2, + exp_hosts_to_run_on_len=2, + force=False) + # TODO: test with config file, attended add node # TODO: test with config file, attended new node already in config file # TODO: test with config file, attended new node already in config file, plus manually added nodes diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py index 01af33fd9..0dd4a30e9 100644 --- a/utils/test/oo_config_tests.py +++ b/utils/test/oo_config_tests.py @@ -14,36 +14,62 @@ SAMPLE_CONFIG = """ variant: openshift-enterprise ansible_ssh_user: root hosts: - - ip: 10.0.0.1 + - connect_to: master-private.example.com + ip: 10.0.0.1 hostname: master-private.example.com public_ip: 24.222.0.1 public_hostname: master.example.com master: true node: true - - ip: 10.0.0.2 + - connect_to: node1-private.example.com + ip: 10.0.0.2 hostname: node1-private.example.com public_ip: 24.222.0.2 public_hostname: node1.example.com node: true - - ip: 10.0.0.3 + - connect_to: node2-private.example.com + ip: 10.0.0.3 hostname: node2-private.example.com public_ip: 24.222.0.3 public_hostname: node2.example.com node: true """ +# Used to test automatic upgrading of config: +LEGACY_CONFIG = """ +Description: This is the configuration file for the OpenShift Ansible-Based Installer. +Name: OpenShift Ansible-Based Installer Configuration +Subscription: {type: none} +Vendor: OpenShift Community +Version: 0.0.1 +ansible_config: /tmp/notreal/ansible.cfg +ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible +ansible_log_path: /tmp/ansible.log +ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins +masters: [10.0.0.1] +nodes: [10.0.0.2, 10.0.0.3] +validated_facts: + 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1} + 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2} + 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3} +""" + + CONFIG_INCOMPLETE_FACTS = """ hosts: - - ip: 10.0.0.1 + - connect_to: 10.0.0.1 + ip: 10.0.0.1 hostname: master-private.example.com public_ip: 24.222.0.1 public_hostname: master.example.com master: true - - ip: 10.0.0.2 - hostname: node1-private.example.com + - connect_to: 10.0.0.2 + ip: 10.0.0.2 + hostname: 24.222.0.2 public_ip: 24.222.0.2 node: true - - ip: 10.0.0.3 + - connect_to: 10.0.0.3 + ip: 10.0.0.3 node: true """ @@ -74,6 +100,48 @@ class OOInstallFixture(unittest.TestCase): return path +class LegacyOOConfigTests(OOInstallFixture): + + def setUp(self): + OOInstallFixture.setUp(self) + self.cfg_path = self.write_config(os.path.join(self.work_dir, + 'ooinstall.conf'), LEGACY_CONFIG) + self.cfg = OOConfig(self.cfg_path) + + def test_load_config_memory(self): + self.assertEquals('openshift-enterprise', self.cfg.settings['variant']) + self.assertEquals('3.0', self.cfg.settings['variant_version']) + self.assertEquals('v1', self.cfg.settings['version']) + + self.assertEquals(3, len(self.cfg.hosts)) + h1 = self.cfg.get_host('10.0.0.1') + self.assertEquals('10.0.0.1', h1.ip) + self.assertEquals('24.222.0.1', h1.public_ip) + self.assertEquals('master-private.example.com', h1.hostname) + self.assertEquals('master.example.com', h1.public_hostname) + + h2 = self.cfg.get_host('10.0.0.2') + self.assertEquals('10.0.0.2', h2.ip) + self.assertEquals('24.222.0.2', h2.public_ip) + self.assertEquals('node1-private.example.com', h2.hostname) + self.assertEquals('node1.example.com', h2.public_hostname) + + h3 = self.cfg.get_host('10.0.0.3') + self.assertEquals('10.0.0.3', h3.ip) + self.assertEquals('24.222.0.3', h3.public_ip) + self.assertEquals('node2-private.example.com', h3.hostname) + self.assertEquals('node2.example.com', h3.public_hostname) + + self.assertFalse('masters' in self.cfg.settings) + self.assertFalse('nodes' in self.cfg.settings) + self.assertFalse('Description' in self.cfg.settings) + self.assertFalse('Name' in self.cfg.settings) + self.assertFalse('Subscription' in self.cfg.settings) + self.assertFalse('Vendor' in self.cfg.settings) + self.assertFalse('Version' in self.cfg.settings) + self.assertFalse('validates_facts' in self.cfg.settings) + + class OOConfigTests(OOInstallFixture): def test_load_config(self): @@ -83,7 +151,7 @@ class OOConfigTests(OOInstallFixture): ooconfig = OOConfig(cfg_path) self.assertEquals(3, len(ooconfig.hosts)) - self.assertEquals("10.0.0.1", ooconfig.hosts[0].name) + self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to) self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip) self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname) @@ -91,6 +159,7 @@ class OOConfigTests(OOInstallFixture): [host['ip'] for host in ooconfig.settings['hosts']]) self.assertEquals('openshift-enterprise', ooconfig.settings['variant']) + self.assertEquals('v1', ooconfig.settings['version']) def test_load_complete_facts(self): cfg_path = self.write_config(os.path.join(self.work_dir, @@ -128,6 +197,7 @@ class OOConfigTests(OOInstallFixture): self.assertTrue('ansible_ssh_user' in written_config) self.assertTrue('variant' in written_config) + self.assertEquals('v1', written_config['version']) # Some advanced settings should not get written out if they # were not specified by the user: |