summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_AWS.md12
-rw-r--r--bin/README_SHELL_COMPLETION37
-rwxr-xr-xbin/ohi147
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
l---------bin/openshift_ansible/aws1
-rw-r--r--bin/openshift_ansible/awsutil.py268
l---------bin/openshift_ansible/multi_inventory.py1
-rw-r--r--bin/openshift_ansible/utils.py30
-rwxr-xr-xbin/opscp151
-rwxr-xr-xbin/opssh154
-rwxr-xr-xbin/oscp184
-rwxr-xr-xbin/ossh172
-rwxr-xr-xbin/ossh_bash_completion52
-rw-r--r--bin/ossh_zsh_completion31
-rw-r--r--bin/zsh_functions/_ossh49
-rw-r--r--filter_plugins/oo_filters.py5
-rw-r--r--filter_plugins/openshift_master.py28
-rwxr-xr-xgit/pylint.sh1
-rw-r--r--inventory/byo/hosts.aep.example21
-rw-r--r--inventory/byo/hosts.origin.example22
-rw-r--r--inventory/byo/hosts.ose.example23
-rw-r--r--openshift-ansible.spec132
-rw-r--r--playbooks/adhoc/uninstall.yml157
-rw-r--r--playbooks/aws/openshift-cluster/config.yml20
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j25
-rw-r--r--playbooks/aws/openshift-cluster/update.yml21
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml105
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh23
l---------playbooks/byo/openshift-cluster/upgrades/docker/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml29
-rw-r--r--playbooks/byo/rhel_subscribe.yml20
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml23
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml27
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml6
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml5
l---------playbooks/common/openshift-loadbalancer/filter_plugins1
l---------playbooks/common/openshift-loadbalancer/lookup_plugins1
l---------playbooks/common/openshift-loadbalancer/roles1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml20
-rw-r--r--playbooks/common/openshift-master/config.yml34
-rw-r--r--playbooks/common/openshift-node/config.yml14
-rw-r--r--playbooks/gce/openshift-cluster/config.yml21
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml5
-rw-r--r--playbooks/gce/openshift-cluster/update.yml21
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml20
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml19
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml16
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml21
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/cockpit/defaults/main.yml4
-rw-r--r--roles/cockpit/meta/main.yml5
-rw-r--r--roles/docker/tasks/main.yml60
-rw-r--r--roles/etcd/defaults/main.yaml6
-rw-r--r--roles/etcd/meta/main.yml9
-rw-r--r--roles/etcd/tasks/main.yml12
-rw-r--r--roles/nuage_master/tasks/certificates.yml2
-rw-r--r--roles/nuage_master/vars/main.yaml2
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml47
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/openshift_cli/tasks/main.yml2
-rw-r--r--roles/openshift_cli/templates/openshift.j210
-rw-r--r--roles/openshift_clock/meta/main.yml15
-rw-r--r--roles/openshift_clock/tasks/main.yaml14
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml15
-rw-r--r--roles/openshift_etcd/meta/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json21
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json49
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json47
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json46
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-persistent-s2i.json783
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-s2i.json710
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-basic-s2i.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-https-s2i.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-persistent-s2i.json8
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-s2i.json8
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-persistent-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-persistent-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-streams/jboss-image-streams.json21
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-basic.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent-ssl.json49
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent.json47
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-ssl.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/decisionserver62-amq-s2i.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-persistent-s2i.json46
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-s2i.json11
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-persistent-s2i.json783
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-s2i.json710
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-basic-s2i.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-https-s2i.json10
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-persistent-s2i.json8
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-s2i.json8
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-persistent-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-persistent-s2i.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-s2i.json16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py33
-rw-r--r--roles/openshift_facts/tasks/main.yml16
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml2
-rw-r--r--roles/openshift_loadbalancer/README.md (renamed from roles/haproxy/README.md)4
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml (renamed from roles/haproxy/defaults/main.yml)10
-rw-r--r--roles/openshift_loadbalancer/handlers/main.yml (renamed from roles/haproxy/handlers/main.yml)0
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml20
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml (renamed from roles/haproxy/tasks/main.yml)6
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.cfg.j2 (renamed from roles/haproxy/templates/haproxy.cfg.j2)8
-rw-r--r--roles/openshift_loadbalancer_facts/README.md34
-rw-r--r--roles/openshift_loadbalancer_facts/meta/main.yml (renamed from roles/haproxy/meta/main.yml)7
-rw-r--r--roles/openshift_loadbalancer_facts/tasks/main.yml30
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml23
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml46
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml13
-rw-r--r--roles/openshift_node/defaults/main.yml14
-rw-r--r--roles/openshift_node/meta/main.yml16
-rw-r--r--roles/openshift_node/tasks/main.yml14
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml7
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service11
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service5
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh4
-rw-r--r--roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo (renamed from roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo)0
-rw-r--r--roles/openshift_repos/tasks/centos_sig.yaml6
-rw-r--r--roles/openshift_repos/tasks/main.yaml18
-rw-r--r--roles/openshift_storage_nfs/defaults/main.yml3
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml3
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml2
-rw-r--r--test/env-setup8
-rw-r--r--test/units/README.md7
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/yedit_test.py143
156 files changed, 5068 insertions, 2176 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 08d0c6d0a..f7cce4809 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.90-1 ./
+3.3.0-1 ./
diff --git a/README_AWS.md b/README_AWS.md
index 467fccbd4..f3f98fed5 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -35,12 +35,12 @@ By default, a cluster is launched into the `public` security group. Make sure yo
You may also want to allow access from the outside world on the following ports:
```
-• 22 - ssh
-• 80 - Web Apps
-• 443 - Web Apps (https)
-• 4789 - SDN / VXLAN
-• 8443 - OpenShift Console
-• 10250 - kubelet
+• 22/TCP - ssh
+• 80/TCP - Web Apps
+• 443/TCP - Web Apps (https)
+• 4789/UDP - SDN / VXLAN
+• 8443/TCP - OpenShift Console
+• 10250/TCP - kubelet
```
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
deleted file mode 100644
index 49bba3acc..000000000
--- a/bin/README_SHELL_COMPLETION
+++ /dev/null
@@ -1,37 +0,0 @@
-# completion is available for ossh/oscp
-
-ossh/oscp uses a dynamic inventory cache in order to lookup
-hostnames and translate them to something meaningful
-such as an IP address or dns name.
-
-This allows us to treat our servers as cattle and not as pets.
-
-If you have not run the ossh command and it has not laid down
-a cache file the completions will not be available.
-
-You can populate the cache by running `ossh --list`. This
-will populate the cache file and the completions should
-become available.
-
-This script will look at the cached version of your
-multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
-It will then parse a few {host}.{env} out of the json
-and return them to be completable.
-
-# BASH
-In order to setup bash completion, source the following script:
-/path/to/repository/openshift-ansible/bin/ossh_bash_completion
-
-# ZSH
-In order to setup zsh completion, you will need to verify
-that the _ossh_zsh_completion script is somewhere in the path
-of $fpath.
-
-Once $fpath includes the _ossh_zsh_completion script then you should
-run `exec zsh`. This will then allow you to call `ossh host[TAB]`
-for a list of completions.
-
-Before completing the final step, zsh keeps its own cache in
-~/.zcompdump of the known functions and variables. In order to
-refresh with new variables and completion arrays you might need
-to `rm ~/.zcompdump` before running `exec zsh`.
diff --git a/bin/ohi b/bin/ohi
deleted file mode 100755
index 9c2ce8432..000000000
--- a/bin/ohi
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-'''
-Ohi = Openshift Host Inventory
-
-This script provides an easy way to look at your host inventory.
-
-This depends on multi_inventory being setup correctly.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import sys
-import os
-import ConfigParser
-
-from openshift_ansible import awsutil
-from openshift_ansible import utils
-from openshift_ansible.awsutil import ArgumentError
-
-CONFIG_MAIN_SECTION = 'main'
-CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-
-
-class Ohi(object):
- '''
- Class for managing openshift host inventory
- '''
- def __init__(self):
- self.host_type_aliases = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.args = None
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil(self.host_type_aliases)
-
- def run(self):
- '''
- Call into awsutil and retrieve the desired hosts and environments
- '''
-
- if self.args.list_host_types:
- self.aws.print_host_types()
- return 0
-
- if self.args.v3:
- version = '3'
- elif self.args.all_versions:
- version = 'all'
- else:
- version = '2'
-
- hosts = self.aws.get_host_list(clusters=self.args.cluster,
- host_type=self.args.host_type,
- sub_host_type=self.args.sub_host_type,
- envs=self.args.env,
- version=version,
- cached=self.args.cache_only)
-
- if hosts is None:
- # We weren't able to determine what they wanted to do
- raise ArgumentError("Invalid combination of arguments")
-
- if self.args.ip:
- hosts = self.aws.convert_to_ip(hosts)
-
- for host in sorted(hosts, key=utils.normalize_dnsname):
- if self.args.user:
- print "%s@%s" % (self.args.user, host)
- else:
- print host
-
- return 0
-
- def parse_config_file(self):
- '''
- Parse the config file for ohi
- '''
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- self.host_type_aliases = {}
- if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
- for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
- value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
- self.host_type_aliases[alias] = value
-
- def parse_cli_args(self):
- """Setup the command line parser with the options we want
- """
-
- parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
-
- parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
- parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
-
- parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
- parser.add_argument('-e', '--env', action="append", help="Which environment to use")
-
- parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
-
- parser.add_argument('-s', '--sub-host-type', action="store", help="Which sub host type to use")
-
- parser.add_argument('-l', '--user', action='store', default=None, help='username')
-
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Retrieve the host inventory by cache only. Default is false.')
-
- parser.add_argument('--v2', action='store_true', default=True,
- help='Specify the openshift version. Default is 2')
-
- parser.add_argument('--v3', action='store_true', default=False,
- help='Specify the openshift version.')
-
- parser.add_argument('--ip', action='store_true', default=False,
- help='Return ip address only.')
-
- parser.add_argument('--all-versions', action='store_true', default=False,
- help='Specify the openshift version. Return all versions')
-
- self.args = parser.parse_args()
-
-def main():
- '''
- Ohi will do its work here
- '''
- if len(sys.argv) == 1:
- print "\nError: No options given. Use --help to see the available options\n"
- sys.exit(0)
-
- try:
- ohi = Ohi()
- exitcode = ohi.run()
- sys.exit(exitcode)
- except ArgumentError as err:
- print "\nError: %s\n" % err.message
-
-if __name__ == '__main__':
- main()
-
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
deleted file mode 100644
index 8786dfc13..000000000
--- a/bin/openshift_ansible.conf.example
+++ /dev/null
@@ -1,6 +0,0 @@
-#[main]
-#inventory = /usr/share/ansible/inventory/multi_inventory.py
-
-#[host_type_aliases]
-#host-type-one = aliasa,aliasb
-#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/bin/openshift_ansible/__init__.py
+++ /dev/null
diff --git a/bin/openshift_ansible/aws b/bin/openshift_ansible/aws
deleted file mode 120000
index eb0575b4d..000000000
--- a/bin/openshift_ansible/aws
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/aws/ \ No newline at end of file
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
deleted file mode 100644
index 11651f087..000000000
--- a/bin/openshift_ansible/awsutil.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-"""This module comprises Aws specific utility functions."""
-
-import os
-import re
-
-# Buildbot does not have multi_inventory installed
-#pylint: disable=no-name-in-module
-from openshift_ansible import multi_inventory
-
-class ArgumentError(Exception):
- """This class is raised when improper arguments are passed."""
-
- def __init__(self, message):
- """Initialize an ArgumentError.
-
- Keyword arguments:
- message -- the exact error message being raised
- """
- super(ArgumentError, self).__init__()
- self.message = message
-
-class AwsUtil(object):
- """This class contains the AWS utility functions."""
-
- def __init__(self, host_type_aliases=None):
- """Initialize the AWS utility class.
-
- Keyword arguments:
- host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
- """
-
- self.alias_lookup = {}
- host_type_aliases = host_type_aliases or {}
-
- self.host_type_aliases = host_type_aliases
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- self.setup_host_type_alias_lookup()
-
- def setup_host_type_alias_lookup(self):
- """Sets up the alias to host-type lookup table."""
- for key, values in self.host_type_aliases.iteritems():
- for value in values:
- self.alias_lookup[value] = key
-
- @staticmethod
- def get_inventory(args=None, cached=False):
- """Calls the inventory script and returns a dictionary containing the inventory."
-
- Keyword arguments:
- args -- optional arguments to pass to the inventory script
- """
- minv = multi_inventory.MultiInventory(args)
- if cached:
- minv.get_inventory_from_cache()
- else:
- minv.run()
- return minv.result
-
- def get_clusters(self):
- """Searches for cluster tags in the inventory and returns all of the clusters found."""
- pattern = re.compile(r'^oo_clusterid_(.*)')
-
- clusters = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- clusters.append(matched.group(1))
-
- clusters.sort()
- return clusters
-
- def get_environments(self):
- """Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^oo_environment_(.*)')
-
- envs = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- envs.append(matched.group(1))
-
- envs.sort()
- return envs
-
- def get_host_types(self):
- """Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^oo_hosttype_(.*)')
-
- host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- host_types.append(matched.group(1))
-
- host_types.sort()
- return host_types
-
- def get_sub_host_types(self):
- """Searches for sub-host-type tags in the inventory and returns all sub-host-types found."""
- pattern = re.compile(r'^oo_subhosttype_(.*)')
-
- sub_host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- sub_host_types.append(matched.group(1))
-
- sub_host_types.sort()
- return sub_host_types
-
- def get_security_groups(self):
- """Searches for security_groups in the inventory and returns all SGs found."""
- pattern = re.compile(r'^security_group_(.*)')
-
- groups = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- groups.append(matched.group(1))
-
- groups.sort()
- return groups
-
- def build_host_dict_by_env(self, args=None):
- """Searches the inventory for hosts in an env and returns their hostvars."""
- args = args or []
- inv = self.get_inventory(args)
-
- inst_by_env = {}
- for _, host in inv['_meta']['hostvars'].items():
- # If you don't have an environment tag, we're going to ignore you
- if 'oo_environment' not in host:
- continue
-
- if host['oo_environment'] not in inst_by_env:
- inst_by_env[host['oo_environment']] = {}
- host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
- inst_by_env[host['oo_environment']][host_id] = host
-
- return inst_by_env
-
- def print_host_types(self):
- """Gets the list of host types and aliases and outputs them in columns."""
- host_types = self.get_host_types()
- ht_format_str = "%35s"
- alias_format_str = "%-20s"
- combined_format_str = ht_format_str + " " + alias_format_str
-
- print
- print combined_format_str % ('Host Types', 'Aliases')
- print combined_format_str % ('----------', '-------')
-
- for host_type in host_types:
- aliases = []
- if host_type in self.host_type_aliases:
- aliases = self.host_type_aliases[host_type]
- print combined_format_str % (host_type, ", ".join(aliases))
- else:
- print ht_format_str % host_type
- print
-
- def resolve_host_type(self, host_type):
- """Converts a host-type alias into a host-type.
-
- Keyword arguments:
- host_type -- The alias or host_type to look up.
-
- Example (depends on aliases defined in config file):
- host_type = ex-node
- returns: openshift-node
- """
- if self.alias_lookup.has_key(host_type):
- return self.alias_lookup[host_type]
- return host_type
-
- @staticmethod
- def gen_version_tag(ver):
- """Generate the version tag
- """
- return "oo_version_%s" % ver
-
- @staticmethod
- def gen_clusterid_tag(clu):
- """Generate the clusterid tag
- """
- return "oo_clusterid_%s" % clu
-
- @staticmethod
- def gen_env_tag(env):
- """Generate the environment tag
- """
- return "oo_environment_%s" % env
-
- def gen_host_type_tag(self, host_type, version):
- """Generate the host type tag
- """
- if version == '2':
- host_type = self.resolve_host_type(host_type)
- return "oo_hosttype_%s" % host_type
-
- @staticmethod
- def gen_sub_host_type_tag(sub_host_type):
- """Generate the host type tag
- """
- return "oo_subhosttype_%s" % sub_host_type
-
- # This function uses all of these params to perform a filters on our host inventory.
- # pylint: disable=too-many-arguments
- def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None, cached=False):
- """Get the list of hosts from the inventory using host-type and environment
- """
- retval = set([])
- envs = envs or []
-
- inv = self.get_inventory(cached=cached)
-
- retval.update(inv.get('all_hosts', []))
-
- if clusters:
- cluster_hosts = set([])
- if len(clusters) > 1:
- for cluster in clusters:
- clu_tag = AwsUtil.gen_clusterid_tag(cluster)
- cluster_hosts.update(inv.get(clu_tag, []))
- else:
- cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
-
- retval.intersection_update(cluster_hosts)
-
- if envs:
- env_hosts = set([])
- if len(envs) > 1:
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- env_hosts.update(inv.get(env_tag, []))
- else:
- env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
-
- retval.intersection_update(env_hosts)
-
- if host_type:
- retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), []))
-
- if sub_host_type:
- retval.intersection_update(inv.get(self.gen_sub_host_type_tag(sub_host_type), []))
-
- if version != 'all':
- retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
-
- return list(retval)
-
- def convert_to_ip(self, hosts, cached=False):
- """convert a list of host names to ip addresses"""
-
- inv = self.get_inventory(cached=cached)
- ips = []
- for host in hosts:
- ips.append(inv['_meta']['hostvars'][host]['oo_public_ip'])
-
- return ips
diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py
deleted file mode 120000
index b40feec07..000000000
--- a/bin/openshift_ansible/multi_inventory.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_inventory.py \ No newline at end of file
diff --git a/bin/openshift_ansible/utils.py b/bin/openshift_ansible/utils.py
deleted file mode 100644
index e6243aa5a..000000000
--- a/bin/openshift_ansible/utils.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-''' The purpose of this module is to contain small utility functions.
-'''
-
-import re
-
-def normalize_dnsname(name, padding=10):
- ''' The purpose of this function is to return a dns name with zero padding,
- so that it sorts properly (as a human would expect).
-
- Example: name=ex-lrg-node10.prod.rhcloud.com
- Returns: ex-lrg-node0000000010.prod.rhcloud.com
-
- Example Usage:
- sorted(['a3.example.com', 'a10.example.com', 'a1.example.com'],
- key=normalize_dnsname)
-
- Returns: ['a1.example.com', 'a3.example.com', 'a10.example.com']
- '''
- parts = re.split(r'(\d+)', name)
- retval = []
- for part in parts:
- if re.match(r'^\d+$', part):
- retval.append(part.zfill(padding))
- else:
- retval.append(part)
-
- return ''.join(retval)
diff --git a/bin/opscp b/bin/opscp
deleted file mode 100755
index 4bfe166f6..000000000
--- a/bin/opscp
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opscp [OPTIONS] local remote
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -r, --recursive recusively copy directories (OPTIONAL)
-
-Example: opscp -t ex-srv -e stg -l irb2 foo.txt /home/irb2/foo.txt
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pscp.pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/opssh b/bin/opssh
deleted file mode 100755
index 0113e7216..000000000
--- a/bin/opssh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opssh [OPTIONS] command [...]
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -i, --inline inline aggregated output and error for each server
- --inline-stdout inline standard output for each server
- -I, --send-input read from standard input and send as input to ssh
- -P, --print print output as we get it
-
-Example: opssh -t ex-srv -e stg -l irb2 --outdir /tmp/foo uptime
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/oscp b/bin/oscp
deleted file mode 100755
index 4d3286ed8..000000000
--- a/bin/oscp
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Oscp(object):
- def __init__(self):
- self.host = None
- self.user = ''
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- # parse host and user
- self.process_host()
-
- self.aws = awsutil.AwsUtil()
-
- # get a dict of host inventory
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- if (self.args.src == '' or self.args.dest == '') and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.host
- print self.args
-
- # perform the scp
- if self.args.list:
- self.list_hosts()
- else:
- self.scp()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-r', '--recurse', action='store_true', default=False,
- help='Recursively copy files to or from destination.')
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oPort=22,TCPKeepAlive=yes"')
-
- parser.add_argument('src', nargs='?', default='')
- parser.add_argument('dest',nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
- # is the first param passed a valid file?
- if os.path.isfile(self.args.src) or os.path.isdir(self.args.src):
- self.local_src = True
- self.host = self.args.dest
- else:
- self.local_src = False
- self.host = self.args.src
-
- if '@' in self.host:
- re_host = re.compile("(.*@)(.*)(:.*$)")
- else:
- re_host = re.compile("(.*)(:.*$)")
-
- search = re_host.search(self.host)
-
- if search:
- if len(search.groups()) > 2:
- self.user = search.groups()[0]
- self.host = search.groups()[1]
- self.path = search.groups()[2]
- else:
- self.host = search.groups()[0]
- self.path = search.groups()[1]
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def scp(self):
- '''scp files to or from a specified host
- '''
- try:
- # shell args start with the program name in position 1
- scp_args = ['/usr/bin/scp']
-
- if self.args.verbose:
- scp_args.append('-v')
-
- if self.args.recurse:
- scp_args.append('-r')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- scp_args.append("-o%s" % arg)
-
- results = self.select_host()
-
- if self.args.debug: print results
-
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- host_str = "%s%s%s" % (self.user, server_info['oo_public_ip'], self.path)
-
- if self.local_src:
- scp_args.append(self.args.src)
- scp_args.append(host_str)
- else:
- scp_args.append(host_str)
- scp_args.append(self.args.dest)
-
- print "Running: %s\n" % ' '.join(scp_args)
-
- os.execve('/usr/bin/scp', scp_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- oscp = Oscp()
-
diff --git a/bin/ossh b/bin/ossh
deleted file mode 100755
index 0dd2fb741..000000000
--- a/bin/ossh
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Ossh(object):
- def __init__(self):
- self.user = None
- self.host = None
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil()
-
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- # parse host and user
- self.process_host()
-
- if self.args.host == '' and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.args
-
- # perform the SSH
- if self.args.list:
- self.list_hosts()
- else:
- self.ssh()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-c', '--command', action='store',
- help='Command to run on remote host')
- parser.add_argument('-l', '--login_name', action='store',
- help='User in which to ssh as')
-
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oForwardX11=yes,TCPKeepAlive=yes"')
- parser.add_argument('-A', default=False, action="store_true",
- help='Forward authentication agent')
- parser.add_argument('host', nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
-
- parts = self.args.host.split('@')
-
- # parse username if passed
- if len(parts) > 1:
- self.user = parts[0]
- self.host = parts[1]
- else:
- self.host = parts[0]
-
- if self.args.login_name:
- self.user = self.args.login_name
-
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def ssh(self):
- '''SSH to a specified host
- '''
- try:
- # shell args start with the program name in position 1
- ssh_args = ['/usr/bin/ssh']
-
- if self.user:
- ssh_args.append('-l%s' % self.user)
-
- if self.args.A:
- ssh_args.append('-A')
-
- if self.args.verbose:
- ssh_args.append('-vvv')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- ssh_args.append("-o%s" % arg)
-
- results = self.select_host()
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- ssh_args.append(server_info['oo_public_ip'])
-
- #last argument
- if self.args.command:
- ssh_args.append("%s" % self.args.command)
-
- print "Running: %s\n" % ' '.join(ssh_args)
-
- os.execve('/usr/bin/ssh', ssh_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- ossh = Ossh()
-
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
deleted file mode 100755
index dcbde3e51..000000000
--- a/bin/ossh_bash_completion
+++ /dev/null
@@ -1,52 +0,0 @@
-__ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- fi
-}
-
-_ossh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__ossh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _ossh ossh oscp
-
-__opssh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- fi
-}
-
-_opssh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__opssh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _opssh opssh
-
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
deleted file mode 100644
index 94ea61dab..000000000
--- a/bin/ossh_zsh_completion
+++ /dev/null
@@ -1,31 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])')
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- fi
-
-}
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
-#_arguments "*:Hosts:_ossh_known_hosts"
- _arguments -s : \
- "*:hosts:->hosts"
-
- case "$state" in
- hosts)
- _values 'hosts' $(_ossh_known_hosts)
- ;;
- esac
-
-}
-_ossh "$@"
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
deleted file mode 100644
index 65979c58a..000000000
--- a/bin/zsh_functions/_ossh
+++ /dev/null
@@ -1,49 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
-}
-
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
- common_arguments=(
- '(- *)'{-h,--help}'[show help]' \
- {-v,--verbose}'[enable verbose]' \
- {-d,--debug}'[debug mode]' \
- {-l,--login_name}+'[login name]:login_name' \
- {-c,--command}+'[command to run on remote host]:command' \
- {-o,--ssh_opts}+'[SSH Options to pass to SSH]:ssh options' \
- {-e,--env}+'[environtment to use]:environment:->env' \
- '--list[list out hosts]' \
- ':OP Hosts:->oo_hosts'
- )
-
- case "$service" in
- ossh)
- _arguments -C -s \
- "$common_arguments[@]" \
- ;;
-
- oscp)
- _arguments -C -s \
- "$common_arguments[@]" \
- {-r,--recurse}'[Recursive copy]' \
- ':file:_files'
- ;;
- esac
-
- case "$state" in
- oo_hosts)
- _values 'oo_hosts' $(_ossh_known_hosts)
- ;;
- env)
- _values 'environment' ops int stg prod
- ;;
- esac
-}
-
-_ossh "$@"
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index e7409bf22..b81c3bf7f 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -306,7 +306,7 @@ class FilterModule(object):
return string.split(separator)
@staticmethod
- def oo_haproxy_backend_masters(hosts):
+ def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
@@ -314,8 +314,7 @@ class FilterModule(object):
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
- server_port = host_info['openshift']['master']['api_port']
- server['address'] = "%s:%s" % (server_ip, server_port)
+ server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index bb2f5ba7a..b3f284a8e 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -9,14 +9,19 @@ import sys
import yaml
from ansible import errors
+from distutils.version import LooseVersion
# pylint: disable=no-name-in-module,import-error
try:
- # ansible-2.0
- from ansible.runner.filter_plugins.core import bool as ansible_bool
+ # ansible-2.1
+ from ansible.plugins.filter.core import to_bool as ansible_bool
except ImportError:
- # ansible-1.9.x
- from ansible.plugins.filter.core import bool as ansible_bool
+ try:
+ #ansible-2.0.x
+ from ansible.runner.filter_plugins.core import bool as ansible_bool
+ except ImportError:
+ # ansible-1.9.x
+ from ansible.plugins.filter.core import bool as ansible_bool
class IdentityProviderBase(object):
""" IdentityProviderBase
@@ -77,10 +82,19 @@ class IdentityProviderBase(object):
self._allow_additional = True
@staticmethod
- def validate_idp_list(idp_list):
+ def validate_idp_list(idp_list, openshift_version, deployment_type):
''' validates a list of idps '''
login_providers = [x.name for x in idp_list if x.login]
+
+ multiple_logins_unsupported = False
if len(login_providers) > 1:
+ if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:
+ if LooseVersion(openshift_version) < LooseVersion('3.2'):
+ multiple_logins_unsupported = True
+ if deployment_type in ['origin']:
+ if LooseVersion(openshift_version) < LooseVersion('1.2'):
+ multiple_logins_unsupported = True
+ if multiple_logins_unsupported:
raise errors.AnsibleFilterError("|failed multiple providers are "
"not allowed for login. login "
"providers: {0}".format(', '.join(login_providers)))
@@ -461,7 +475,7 @@ class FilterModule(object):
''' Custom ansible filters for use by the openshift_master role'''
@staticmethod
- def translate_idps(idps, api_version):
+ def translate_idps(idps, api_version, openshift_version, deployment_type):
''' Translates a list of dictionaries into a valid identityProviders config '''
idp_list = []
@@ -478,7 +492,7 @@ class FilterModule(object):
idp_list.append(idp_inst)
- IdentityProviderBase.validate_idp_list(idp_list)
+ IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)
return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
@staticmethod
diff --git a/git/pylint.sh b/git/pylint.sh
index f29c055dc..3acf9cc8c 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -7,6 +7,7 @@ ANSIBLE_UPSTREAM_FILES=(
'inventory/libvirt/hosts/libvirt_generic.py'
'inventory/openstack/hosts/nova.py'
'lookup_plugins/sequence.py'
+ 'playbooks/gce/openshift-cluster/library/gce.py'
)
OLDREV=$1
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index c31d39d59..1e050f0f6 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -72,10 +72,8 @@ deployment_type=atomic-enterprise
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=json-file --log-opt max-size=50m"
#openshift_docker_options="-l warn --ipv6=false"
-# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
-#openshift_docker_log_driver=json
-#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -394,20 +392,23 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to their
-# corresponding values above but you may set them independently. See BuildDefaults
+# environment variables into Builds. These values will default to the global proxy
+# config values. You only need to set these if they differ from the global settings
+# above. See BuildDefaults
# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=openshift_http_proxy
-#openshift_builddefaults_https_proxy=openshift_https_proxy
-#openshift_builddefaults_no_proxy=openshift_noproxy
-#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
-#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
# host group for masters
[masters]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 3a7842a33..104c64c23 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -73,10 +73,8 @@ deployment_type=origin
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=json-file --log-opt max-size=50m"
#openshift_docker_options="-l warn --ipv6=false"
-# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
-#openshift_docker_log_driver=json
-#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -399,20 +397,24 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to their
-# corresponding values above but you may set them independently. See BuildDefaults
+# environment variables into Builds. These values will default to the global proxy
+# config values. You only need to set these if they differ from the global settings
+# above. See BuildDefaults
# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=openshift_http_proxy
-#openshift_builddefaults_https_proxy=openshift_https_proxy
-#openshift_builddefaults_no_proxy=openshift_noproxy
-#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
-#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index cb46c352e..ebac28fc6 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -72,11 +72,8 @@ deployment_type=openshift-enterprise
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
+# Default value: "--log-driver=json-file --log-opt max-size=50m"
#openshift_docker_options="-l warn --ipv6=false"
-# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
-#openshift_docker_log_driver=json
-#openshift_docker_log_options=["tag=mailer"]
-
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -395,20 +392,24 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to their
-# corresponding values above but you may set them independently. See BuildDefaults
+# environment variables into Builds. These values will default to the global proxy
+# config values. You only need to set these if they differ from the global settings
+# above. See BuildDefaults
# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=openshift_http_proxy
-#openshift_builddefaults_https_proxy=openshift_https_proxy
-#openshift_builddefaults_no_proxy=openshift_noproxy
-#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
-#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
+#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
+# Configure usage of openshift_clock role.
+#openshift_clock_enabled=true
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 68c57ab9c..b44f4e28a 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.90
+Version: 3.3.0
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -38,6 +38,7 @@ popd
mkdir -p %{buildroot}%{_datadir}/%{name}
mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
mkdir -p %{buildroot}%{_datadir}/ansible_plugins
+cp -rp library %{buildroot}%{_datadir}/ansible/%{name}/
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -78,6 +79,8 @@ popd
%files
%doc LICENSE.md README*
%dir %{_datadir}/ansible/%{name}
+%{_datadir}/ansible/%{name}/library
+%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
# ----------------------------------------------------------------------------------
# openshift-ansible-docs subpackage
@@ -110,11 +113,30 @@ BuildArch: noarch
%files playbooks
%{_datadir}/ansible/%{name}/playbooks
+# We moved playbooks/common/openshift-master/library up to the top and replaced
+# it with a symlink. RPM doesn't handle this so we have to do some pre-transaction
+# magic. See https://fedoraproject.org/wiki/Packaging:Directory_Replacement
+%pretrans playbooks -p <lua>
+-- Define the path to directory being replaced below.
+-- DO NOT add a trailing slash at the end.
+path = "/usr/share/ansible/openshift-ansible/playbooks/common/openshift-master/library"
+st = posix.stat(path)
+if st and st.type == "directory" then
+ status = os.rename(path, path .. ".rpmmoved")
+ if not status then
+ suffix = 0
+ while not status do
+ suffix = suffix + 1
+ status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+ end
+ os.rename(path, path .. ".rpmmoved")
+ end
+end
+%package roles
# ----------------------------------------------------------------------------------
# openshift-ansible-roles subpackage
# ----------------------------------------------------------------------------------
-%package roles
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name} = %{version}
Requires: %{name}-lookup-plugins = %{version}
@@ -183,6 +205,112 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jun 09 2016 Scott Dodson <sdodson@redhat.com> 3.3.0-1
+- Restore mistakenly reverted code. (dgoodwin@redhat.com)
+- Add openshift_loadbalancer_facts role to set lb facts prior to running
+ dependencies. (abutcher@redhat.com)
+- Bug 1338726 - never abort install if the latest version of docker is already
+ installed (bleanhar@redhat.com)
+- Preserve proxy config if it's undefined (sdodson@redhat.com)
+- At least backup things (sdodson@redhat.com)
+- Use unique play names to make things easier to debug (sdodson@redhat.com)
+- Ansible 2.1 support. (abutcher@redhat.com)
+- add skydns port 8053 to openstack master sec group (jawed.khelil@amadeus.com)
+- fix dns openstack flavor instead of openshift flavor
+ (jawed.khelil@amadeus.com)
+- Fix Docker 1.10 problems with empty tags and trailing : (dgoodwin@redhat.com)
+- ensure htpasswd file exists (tob@butter.sh)
+- Docker 1.10 Upgrade (dgoodwin@redhat.com)
+- Add flag to manage htpasswd, or not. (tob@butter.sh)
+
+* Mon Jun 06 2016 Scott Dodson <sdodson@redhat.com> 3.0.97-1
+- Only run node specific bits on nodes (sdodson@redhat.com)
+- Update main.yaml (detiber@gmail.com)
+- Hardcoded values in "launch_instances" - isue # 1970 (daniel@dumdan.com)
+- XPAAS v1.3.1 content for Origin 1.1 / OSE 3.1 (sdodson@redhat.com)
+- XPAAS v1.3.1 release for Origin 1.2 / OSE 3.2 (sdodson@redhat.com)
+- Configure default docker logging options. (abutcher@redhat.com)
+- Run rhel_subscribe on l_oo_all_hosts rather than all (sdodson@redhat.com)
+- Fix error with stopping services that may not exist. (dgoodwin@redhat.com)
+- Add haproxy_frontend_port to vars for openshift-loadbalancer.
+ (abutcher@redhat.com)
+- Move os_firewall_allow from defaults to role dependencies.
+ (abutcher@redhat.com)
+- Ensure registry url evaluated when creating router. (abutcher@redhat.com)
+- Document protocol in readme aws. (abutcher@redhat.com)
+- Revert openshift-certificates changes. (abutcher@redhat.com)
+- wait metrics-deployer complete (need to configure nodes before hosted
+ services) (you@example.com)
+- switch to using sig release packages (jdetiber@redhat.com)
+- temporarily disable gpg checking until we have a way to cleanly enable it
+ (jdetiber@redhat.com)
+- Switch to using CentOS SIG repos for Origin installs (jdetiber@redhat.com)
+- Separate master and haproxy config playbooks. (abutcher@redhat.com)
+- Cleanup bin, test and roles/openshift_ansible_inventory following move to
+ openshift-tools (abutcher@redhat.com)
+- Catch more uninstall targets (sdodson@redhat.com)
+- Adding openshift_clock parameters to example inventory files
+ (jstuever@redhat.com)
+- Enable openshift_clock role for openshift_master, openshift_node, and
+ openshift_etcd (jstuever@redhat.com)
+- Add openshift_clock role to manage system clocks (jstuever@redhat.com)
+- Allow clock role in openshift_facts (jstuever@redhat.com)
+- Consolidate ca/master/node certificates roles into openshift_certificates.
+ (abutcher@redhat.com)
+- allow for overriding dns_flavor for openstack provider (jdetiber@redhat.com)
+- add user-data file back to openstack provisioner (jdetiber@redhat.com)
+- g_all_hosts with templated with_items causes errors with ansible 1.9.4 under
+ some conditions (jdetiber@redhat.com)
+- openstack_fixes (jdetiber@redhat.com)
+- libvirt_fixes (jdetiber@redhat.com)
+- gce fixes (jdetiber@redhat.com)
+- aws provider fixes (jdetiber@redhat.com)
+- Call evaluate_groups from update_repos_and_packages (jdetiber@redhat.com)
+
+* Thu May 26 2016 Scott Dodson <sdodson@redhat.com> 3.0.94-1
+- Use grep to decide when to add our comment (sdodson@redhat.com)
+
+* Tue May 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.93-1
+- Fixup spec file (tdawson@redhat.com)
+
+* Tue May 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.92-1
+- Conditionally bind mount /usr/bin/docker-current when it is present (#1941)
+ (sdodson@redhat.com)
+
+* Tue May 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.91-1
+- Removed the echo line and replaced it with inline comment. To keep 99-origin-
+ dns.sh from adding a new line in /etc/resolv.conf everytime the
+ NetworkManager dispatcher script is executed. (jnordell@redhat.com)
+- Extend multiple login provider check to include origin. (abutcher@redhat.com)
+- Allow multiple login providers post 3.2. (abutcher@redhat.com)
+- Make rhel_subscribe role able to subscribe for OSE 3.2 (lhuard@amadeus.com)
+- Ensure yum-utils installed. (abutcher@redhat.com)
+- Remove newline from docker_options template string. (abutcher@redhat.com)
+- Use systemctl restart docker instead of ansible service.
+ (dgoodwin@redhat.com)
+- Use cluster hostname while generating certificate on the master nodes
+ (vishal.patil@nuagenetworks.net)
+- Fix playbooks/openshift-master/library move to symlink (sdodson@redhat.com)
+- Task "Update router image to current version" failed, if router not in
+ default namespace (jkroepke@users.noreply.github.com)
+- docker-current was missing from the containerized atomic-openshift-
+ node.service file (maci.stgn@gmail.com)
+- fixed issue with blank spaces instead commas as variables template separators
+ (j.david.nieto@gmail.com)
+- Refactor where we compute no_proxy hostnames (sdodson@redhat.com)
+- Fix for ansible v2 (sdodson@redhat.com)
+- Fix rhel_subscribe (sdodson@redhat.com)
+- remove interpolated g_all_hosts with_items arg from upgrade playbooks
+ (cboggs@rallydev.com)
+- Set openshift.common.hostname early in playbook execution.
+ (abutcher@redhat.com)
+- Fix 'recursive loop detected in template string' for upgrading variable.
+ (abutcher@redhat.com)
+- a-o-i: No proxy questions for 3.0/3.1 (smunilla@redhat.com)
+- Fix minor upgrades in 3.1 (sdodson@redhat.com)
+- Don't pull cli image when we're not containerized (sdodson@redhat.com)
+- Check consumed pools prior to attaching. (abutcher@redhat.com)
+
* Mon May 16 2016 Troy Dawson <tdawson@redhat.com> 3.0.90-1
- Fixes for openshift_docker_hosted_registry_insecure var.
(dgoodwin@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index a407e326b..dbf924683 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -25,11 +25,6 @@
- set_fact:
is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
- - name: Remove br0 interface
- shell: ovs-vsctl del-br br0
- changed_when: False
- failed_when: False
-
- name: Stop services
service: name={{ item }} state=stopped
with_items:
@@ -108,82 +103,12 @@
- tuned-profiles-openshift-node
- tuned-profiles-origin-node
- - name: Remove linux interfaces
- shell: ip link del "{{ item }}"
- changed_when: False
- failed_when: False
- with_items:
- - lbr0
- - vlinuxbr
- - vovsbr
-
- shell: systemctl reset-failed
changed_when: False
- shell: systemctl daemon-reload
changed_when: False
- - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
- changed_when: False
- failed_when: False
- with_items:
- - openshift-enterprise
- - atomic-enterprise
- - origin
-
- - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
- changed_when: False
- failed_when: False
- register: exited_containers_to_delete
- with_items:
- - aep3.*/aep
- - aep3.*/node
- - aep3.*/openvswitch
- - openshift3/ose
- - openshift3/node
- - openshift3/openvswitch
- - openshift/origin
-
- - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ exited_containers_to_delete.results }}"
-
- - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
- changed_when: False
- failed_when: False
- register: images_to_delete
- with_items:
- - registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- - registry\.qe\.openshift\.com/.*
- - registry\.access\..*redhat\.com/rhel7/etcd
- - docker.io/openshift
-
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ images_to_delete.results }}"
-
- - name: Remove sdn drop files
- file:
- path: /run/openshift-sdn
- state: absent
-
- - name: restart docker
- service:
- name: docker
- state: restarted
-
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
@@ -199,6 +124,12 @@
- /etc/systemd/system/atomic-openshift-master-api.service
- /etc/systemd/system/atomic-openshift-master-controllers.service
- /etc/systemd/system/atomic-openshift-node.service
+ - /etc/systemd/system/atomic-openshift-node-dep.service
+ - /etc/systemd/system/origin-master.service
+ - /etc/systemd/system/origin-master-api.service
+ - /etc/systemd/system/origin-master-controllers.service
+ - /etc/systemd/system/origin-node.service
+ - /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/etcd_container.service
- /etc/systemd/system/openvswitch.service
- /etc/sysconfig/atomic-enterprise-master
@@ -209,8 +140,15 @@
- /etc/sysconfig/atomic-openshift-master-api
- /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/atomic-openshift-node-dep
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /etc/sysconfig/origin-node
+ - /etc/sysconfig/origin-node-dep
- /etc/sysconfig/openshift-master
- /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openshift-node-dep
- /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
@@ -246,7 +184,76 @@
- hosts: nodes
become: yes
tasks:
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
- name: restart docker
service: name=docker state=restarted
+
- name: restart NetworkManager
service: name=NetworkManager state=restarted
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
+ - openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - name: Remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 8402b3579..4839c100b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,7 +1,20 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
@@ -21,3 +34,4 @@
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 7d5776ae6..d22c86cda 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -150,6 +150,7 @@
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
+ ec2_tag_sub-host-type: "{{ sub_host_type }}"
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 2a3974a8c..b1087f9c4 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -3,8 +3,10 @@
mounts:
- [ xvdb ]
- [ ephemeral0 ]
+{% endif %}
write_files:
+{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
- content: |
DEVS=/dev/xvdb
VG=docker_vg
@@ -12,8 +14,7 @@ write_files:
owner: root:root
permissions: '0644'
{% endif %}
-
-{% if deployment_vars[deployment_type].become %}
+{% if deployment_vars[deployment_type].become | bool %}
- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
permissions: 440
content: |
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index bd31c42dd..d762203b2 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,12 +1,25 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Update - Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Update - Evaluate oo_hosts_to_update
add_host:
@@ -14,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index 8bda72ac2..d774187f0 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -17,7 +17,7 @@ deployment_rhel7_ent_base:
deployment_vars:
origin:
# centos-7, requires marketplace
- image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}"
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: centos
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
new file mode 100644
index 000000000..6c12e8245
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -0,0 +1,105 @@
+
+- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Determine available Docker version
+ script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Set fact if docker requires an upgrade
+ set_fact:
+ docker_upgrade: true
+ when: not openshift.common.is_atomic | bool and pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<')
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
+
+# If a node fails, halt everything, the admin will need to clean up and we
+# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
+# and will not take any action on a node already running 1.10+.
+- name: Evacuate and upgrade nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ tasks:
+ - debug: var=docker_upgrade
+
+ - name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts)
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Stop containerized services
+ service: name={{ item }} state=stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ - etcd
+ - openvswitch
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Remove all containers and images
+ script: files/nuke_images.sh docker
+ register: nuke_images_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ register: docker_upgrade_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
new file mode 100644
index 000000000..9a5ee2276
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Stop any running containers
+running_container_count=`docker ps -q | wc -l`
+if test $running_container_count -gt 0
+then
+ docker stop $(docker ps -q)
+fi
+
+# Delete all containers
+container_count=`docker ps -a -q | wc -l`
+if test $container_count -gt 0
+then
+ docker rm -f -v $(docker ps -a -q)
+fi
+
+# Delete all images (forcefully)
+image_count=`docker images -q | wc -l`
+if test $image_count -gt 0
+then
+ # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144
+ docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge."
+fi
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
new file mode 100644
index 000000000..0f86abd89
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -0,0 +1,29 @@
+# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
+#
+# Currently only supports upgrading 1.9.x to >= 1.10.x.
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+ changed_when: false
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: docker_upgrade.yml
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 990ddd2f2..f093411ef 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,23 @@
---
-- hosts: all
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
+- hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 5ed1d3b3c..ebddc7841 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -28,25 +28,4 @@
- role: flannel_register
when: openshift.common.use_flannel | bool
-- name: Create persistent volumes and create hosted services
- hosts: oo_first_master
- vars:
- attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
- deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- - role: openshift_registry
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
- when: deploy_infra | bool and attach_registry_volume | bool
- - role: openshift_metrics
- when: openshift.hosted.metrics.deploy | bool
+
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 903babc45..5fec11541 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -31,6 +31,8 @@
- include: ../openshift-nfs/config.yml
+- include: ../openshift-loadbalancer/config.yml
+
- include: ../openshift-master/config.yml
- include: additional_config.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 1cbc0f544..811b3d685 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,5 +1,30 @@
+- name: Create persistent volumes and create hosted services
+ hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
+ deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ - role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when: deploy_infra | bool and attach_registry_volume | bool
+ - role: openshift_metrics
+ when: openshift.hosted.metrics.deploy | bool
+
- name: Create Hosted Resources
hosts: oo_first_master
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
roles:
- role: openshift_hosted
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 1474bb3ca..0a37d4597 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -1,4 +1,6 @@
---
+- include: evaluate_groups.yml
+
- hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
index 96944a78b..9bbeff660 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -2,7 +2,7 @@
# Here we don't really care if this is a master, api, controller or node image.
# We just need to know the version of one of them.
-unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
+unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
if [ ${1} == "origin" ]; then
image_name="openshift/origin"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index a72749a2b..3a4c58e43 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -54,7 +54,7 @@
- script: ../files/pre-upgrade-check
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
index 66935e061..85d7073f2 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -29,7 +29,7 @@
valid version for a {{ target_version }} upgrade
when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
deleted file mode 100644
index d9177e8a0..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Check if Docker is installed
- command: rpm -q docker
- register: pkg_check
- failed_when: pkg_check.rc > 1
- changed_when: no
-
-- name: Upgrade Docker
- command: "{{ ansible_pkg_mgr}} update -y docker"
- when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
- register: docker_upgrade
-
-- name: Restart Docker
- service: name=docker state=restarted
- when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
index 12e2edfb9..31e76805c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -37,7 +37,7 @@
- name: Update router image to current version
when: all_routers.rc == 0
command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
--api-version=v1
with_items: haproxy_routers
@@ -52,7 +52,7 @@
- name: Update registry image to current version
when: _default_registry.rc == 0
command: >
- {{ oc_cmd }} patch dc/docker-registry -p
+ {{ oc_cmd }} patch dc/docker-registry -n default -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
--api-version=v1
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index dd9843290..6bff16674 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -53,7 +53,7 @@
valid version for a {{ target_version }} upgrade
when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
-- name: Verify upgrade can proceed
+- name: Verify master processes
hosts: oo_masters_to_config
roles:
- openshift_facts
@@ -84,7 +84,7 @@
enabled: yes
when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-- name: Verify upgrade can proceed
+- name: Verify node processes
hosts: oo_nodes_to_config
roles:
- openshift_facts
@@ -96,7 +96,7 @@
enabled: yes
when: openshift.common.is_containerized | bool
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index c93bf2a17..156e80c0f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -3,13 +3,13 @@
# The restart playbook should be run after this playbook completes.
###############################################################################
-- name: Upgrade docker
+- include: ../../../../byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+
+- name: Update Docker facts
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
roles:
- openshift_facts
tasks:
- - include: docker_upgrade.yml
- when: not openshift.common.is_atomic | bool
- name: Set post docker install facts
openshift_facts:
role: "{{ item.role }}"
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..f4392173a
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -0,0 +1,5 @@
+---
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ roles:
+ - role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
new file mode 100644
index 000000000..19fffd5e9
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -0,0 +1,20 @@
+---
+- name: Populate g_service_nodes host group if needed
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_lb
+ add_host: name={{ item }} groups=g_service_lb
+ with_items: oo_host_group_exp | default([])
+
+- name: Change state on lb instance(s)
+ hosts: g_service_lb
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=hapoxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 91d66a9cb..0ca148169 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -235,33 +235,6 @@
validate_checksum: yes
with_items: "{{ masters_needing_certs | default([]) }}"
-- name: Configure load balancers
- hosts: oo_lb_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- haproxy_limit_nofile: 100000
- haproxy_global_maxconn: 20000
- haproxy_default_maxconn: 20000
- haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- haproxy_frontends:
- - name: atomic-openshift-api
- mode: tcp
- options:
- - tcplog
- binds:
- - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- default_backend: atomic-openshift-api
- haproxy_backends:
- - name: atomic-openshift-api
- mode: tcp
- option: tcplog
- balance: source
- servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}"
- roles:
- - role: openshift_facts
- - role: haproxy
- when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool
-
- name: Check for cached session secrets
hosts: oo_first_master
roles:
@@ -348,6 +321,13 @@
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index a41fca45a..b3491ef8d 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -116,6 +116,13 @@
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- openshift_node
@@ -125,6 +132,13 @@
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- openshift_node
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 475d29293..b973c513f 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -1,8 +1,23 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../gce/openshift-cluster/vars.yml
- - ../../gce/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
new file mode 100644
index 000000000..fcaa3b850
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/library/gce.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gce
+version_added: "1.4"
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/products/compute-engine) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ image:
+ description:
+ - image string to use for the instance
+ required: false
+ default: "debian-7"
+ instance_names:
+ description:
+ - a comma-separated list of instance names to create or destroy
+ required: false
+ default: null
+ machine_type:
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ required: false
+ default: "n1-standard-1"
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ required: false
+ default: null
+ service_account_email:
+ version_added: "1.5.1"
+ description:
+ - service account email
+ required: false
+ default: null
+ service_account_permissions:
+ version_added: "2.0"
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ required: false
+ default: null
+ choices: [
+ "bigquery", "cloud-platform", "compute-ro", "compute-rw",
+ "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
+ "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
+ "storage-rw", "taskqueue", "userinfo-email"
+ ]
+ pem_file:
+ version_added: "1.5.1"
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ project_id:
+ version_added: "1.5.1"
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+ name:
+ description:
+ - identifier when working with a single instance
+ required: false
+ network:
+ description:
+ - name of the network, 'default' will be used if not specified
+ required: false
+ default: "default"
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ required: false
+ default: "false"
+ disks:
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ required: false
+ default: null
+ version_added: "1.7"
+ state:
+ description:
+ - desired state of the resource
+ required: false
+ default: "present"
+ choices: ["active", "present", "absent", "deleted"]
+ tags:
+ description:
+ - a comma-separated list of tags to associate with the instance
+ required: false
+ default: null
+ zone:
+ description:
+ - the GCE zone to use
+ required: true
+ default: "us-central1-a"
+ ip_forward:
+ version_added: "1.9"
+ description:
+ - set to true if the instance can forward ip packets (useful for
+ gateways)
+ required: false
+ default: "false"
+ external_ip:
+ version_added: "1.9"
+ description:
+ - type of external ip, ephemeral by default
+ required: false
+ default: "ephemeral"
+ disk_auto_delete:
+ version_added: "1.9"
+ description:
+ - if set boot disk will be removed after instance destruction
+ required: false
+ default: "true"
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3"
+notes:
+ - Either I(name) or I(instance_names) is required.
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 7 instance in the
+# us-central1-a Zone of n1-standard-1 machine type.
+- local_action:
+ module: gce
+ name: test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-7
+
+# Example using defaults and with metadata to create a single 'foo' instance
+- local_action:
+ module: gce
+ name: foo
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+
+
+# Launch instances from a control node, runs some tasks on the new instances,
+# and then terminate them
+- name: Create a sandbox instance
+ hosts: localhost
+ vars:
+ names: foo,bar
+ machine_type: n1-standard-1
+ image: debian-6
+ zone: us-central1-a
+ service_account_email: unique-email@developer.gserviceaccount.com
+ pem_file: /path/to/pem_file
+ project_id: project-id
+ tasks:
+ - name: Launch instances
+ local_action: gce instance_names={{names}} machine_type={{machine_type}}
+ image={{image}} zone={{zone}}
+ service_account_email={{ service_account_email }}
+ pem_file={{ pem_file }} project_id={{ project_id }}
+ register: gce
+ - name: Wait for SSH to come up
+ local_action: wait_for host={{item.public_ip}} port=22 delay=10
+ timeout=60 state=started
+ with_items: {{gce.instance_data}}
+
+- name: Configure instance(s)
+ hosts: launched
+ sudo: True
+ roles:
+ - my_awesome_role
+ - my_awesome_tasks
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ instance_names: {{gce.instance_names}}
+
+'''
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except:
+ netname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ service_account_permissions = module.params.get('service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+
+ if external_ip == "none":
+ external_ip = None
+
+ new_instances = []
+ changed = False
+
+ lc_image = gce.ex_get_image(image)
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name']))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type)
+ lc_zone = gce.ex_get_zone(zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP.keys():
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email:
+ ex_sa_perms.append({'email': service_account_email})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ for name in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.create_volume(None, "%s" % name, image=lc_image)
+ except ResourceExistsError:
+ pd = gce.ex_get_volume("%s" % name, lc_zone)
+ inst = None
+ try:
+ inst = gce.create_node(
+ name, lc_machine_type, lc_image, location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
+ external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ changed = True
+ except ResourceExistsError:
+ inst = gce.ex_get_node(name, lc_zone)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (name, e.value))
+
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i+1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ if inst:
+ new_instances.append(inst)
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def terminate_instances(module, gce, instance_names, zone_name):
+ """Terminates a list of instances.
+
+ module: Ansible module object
+ gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone_name: the zone where the instances reside prior to termination
+
+ Returns a dictionary of instance names that were terminated.
+
+ """
+ changed = False
+ terminated_instance_names = []
+ for name in instance_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone_name)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if inst:
+ gce.destroy_node(inst)
+ terminated_instance_names.append(inst.name)
+ changed = True
+
+ return (changed, terminated_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-7'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(),
+ network=dict(default='default'),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(choices=['ephemeral', 'none'],
+ default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ )
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ instance_names = module.params.get('instance_names')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ name = module.params.get('name')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ changed = False
+
+ inames = []
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames.append(name)
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted']:
+ json_output['state'] = 'absent'
+ (changed, terminated_instance_names) = terminate_instances(
+ module, gce, inames, zone)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names:
+ json_output['instance_names'] = terminated_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index e3efd8566..c5c479052 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -17,6 +17,11 @@
- clusterid-{{ cluster_id }}
- host-type-{{ type }}
- sub-host-type-{{ g_sub_host_type }}
+ metadata:
+ startup-script: |
+ #!/bin/bash
+ echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
+
when: instances |length > 0
register: gce
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 9b7a2777a..332f27da7 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,12 +1,25 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -14,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 81a6fff0d..032d4cf68 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,10 +2,23 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../libvirt/openshift-cluster/vars.yml
- - ../../libvirt/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo: "{{ deployment_vars[deployment_type].become }}"
@@ -21,3 +34,4 @@
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 7231f255a..833586ffa 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -83,7 +83,7 @@
with_items: instances
- name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
retries: 60
@@ -91,7 +91,7 @@
when: instances | length != 0
- name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
register: scratch_ip
with_items: instances
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 9b7a2777a..28362c984 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
@@ -14,7 +30,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 9c0ca9af9..6e4f414d6 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,8 +1,21 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../openstack/openshift-cluster/vars.yml
- - ../../openstack/openshift-cluster/cluster_hosts.yml
vars:
g_nodeonmaster: true
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 2f05c3adc..422e6dafe 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -288,6 +288,14 @@ resources:
port_range_max: 53
- direction: ingress
protocol: tcp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: udp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: tcp
port_range_min: 24224
port_range_max: 24224
- direction: ingress
@@ -591,11 +599,17 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: { get_file: user-data }
- config:
str_replace:
template: |
#cloud-config
+ disable_root: true
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
write_files:
- path: /etc/sudoers.d/00-openshift-no-requiretty
permissions: 440
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 3d4fe42d0..b9aae2f4c 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -46,7 +46,7 @@
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P dns_flavor=m1.small
+ -P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
- name: Wait for OpenStack Stack readiness
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 539af6524..6d4d23963 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: dns.yml
- name: Populate oo_hosts_to_update group
@@ -6,9 +22,6 @@
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
@@ -16,7 +29,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 84cba0506..bc53a51b0 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -13,6 +13,7 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
+ dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml
deleted file mode 100644
index 9cf665841..000000000
--- a/roles/cockpit/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-os_firewall_allow:
-- service: cockpit-ws
- port: 9090/tcp
diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml
index 1e3948b19..43047902d 100644
--- a/roles/cockpit/meta/main.yml
+++ b/roles/cockpit/meta/main.yml
@@ -12,4 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
- - { role: os_firewall }
+- role: os_firewall
+ os_firewall_allow:
+ - service: cockpit-ws
+ port: 9090/tcp
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index b9b2666fb..3368e7cd9 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,41 +1,42 @@
---
-# tasks file for docker
-
-- name: Get current installed version if docker_version is specified
+# Going forward we require Docker 1.10 or greater. If the user has a lesser version installed they must run a separate upgrade process.
+- name: Get current installed Docker version
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
- when: not openshift.common.is_atomic | bool and docker_version != ''
- register: docker_version_result
+ when: not openshift.common.is_atomic | bool
+ register: curr_docker_version
changed_when: false
-- stat: path=/etc/sysconfig/docker-storage
- register: docker_storage_check
+# TODO: The use of upgrading var will be removed in the coming upgrade refactor. This is a temporary
+# fix to wory around the fact that right now, this role is called during upgrade, before we're
+# ready to upgrade Docker.
+- name: Fail if Docker upgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} must be upgraded to Docker 1.10 or greater"
+ when: not upgrading | bool and not curr_docker_version | skipped and curr_docker_version.stdout | default('0.0', True) | version_compare('1.10', '<')
-- name: Remove deferred deletion for downgrades from 1.9
+- name: Get latest available version of Docker
command: >
- sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage
- when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<')
-
-- name: Downgrade docker if necessary
- command: "{{ ansible_pkg_mgr }} swap -y docker-* docker-*{{ docker_version }}"
- register: docker_downgrade_result
- when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt')
+ {{ repoquery_cmd }} --qf '%{version}' "docker"
+ register: avail_docker_version
+ failed_when: false
+ changed_when: false
+ when: not curr_docker_version.stdout | default('0.0', True) | version_compare('1.10', '>=') and not openshift.common.is_atomic | bool
-- name: Install docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
- when: not openshift.common.is_atomic | bool and docker_downgrade_result | skipped
+- name: Verify Docker >= 1.10 is available
+ fail:
+ msg: "Docker {{ avail_docker_version.stdout }} is available, but 1.10 or greater is required"
+ when: not avail_docker_version | skipped and avail_docker_version.stdout | default('0.0', True) | version_compare('1.10', '<')
-# If docker were enabled and started before we downgraded it may have entered a
-# failed state. Check for that and clear it if necessary.
-- name: Check that docker hasn't entered failed state
- command: systemctl show docker
- register: docker_state
- changed_when: False
+- stat: path=/etc/sysconfig/docker-storage
+ register: docker_storage_check
-- name: Reset docker service state
- command: systemctl reset-failed docker.service
- when: " 'ActiveState=failed' in docker_state.stdout "
+# Make sure Docker is installed, but does not update a running version.
+# Docker upgrades are handled by a separate playbook.
+- name: Install Docker
+ action: "{{ ansible_pkg_mgr }} name=docker state=present"
+ when: not openshift.common.is_atomic | bool
-- name: Start the docker service
+- name: Start the Docker service
service:
name: docker
enabled: yes
@@ -85,8 +86,9 @@
reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
notify:
- restart docker
+ when: "{{ 'http_proxy' in openshift.common or 'https_proxy' in openshift.common and docker_check.stat.isreg }}"
-- name: Set various docker options
+- name: Set various Docker options
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 1cb055816..2ec62c37c 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -14,9 +14,3 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_clien
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_data_dir: /var/lib/etcd/
-
-os_firewall_allow:
-- service: etcd
- port: "{{etcd_client_port}}/tcp"
-- service: etcd peering
- port: "{{ etcd_peer_port }}/tcp"
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index a71b36237..7156a9fff 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -16,5 +16,10 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: os_firewall }
-- { role: etcd_common }
+- role: os_firewall
+ os_firewall_allow:
+ - service: etcd
+ port: "{{etcd_client_port}}/tcp"
+ - service: etcd peering
+ port: "{{ etcd_peer_port }}/tcp"
+- role: etcd_common
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index a798dc973..71735dc25 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -28,18 +28,18 @@
state: directory
mode: 0700
+- name: Check for etcd service presence
+ command: systemctl show etcd.service
+ register: etcd_show
+ changed_when: false
+
- name: Disable system etcd when containerized
- when: etcd_is_containerized | bool
+ when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
service:
name: etcd
state: stopped
enabled: no
-- name: Check for etcd service presence
- command: systemctl show etcd.service
- register: etcd_show
- changed_when: false
-
- name: Mask system etcd when containerized
when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
command: systemctl mask etcd
diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml
index 0d3c69467..32b024487 100644
--- a/roles/nuage_master/tasks/certificates.yml
+++ b/roles/nuage_master/tasks/certificates.yml
@@ -10,7 +10,7 @@
- name: Create the req file
command: >
- openssl req -key "{{ nuage_ca_master_rest_server_key }}" -new -out "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -subj "/CN={{ ansible_nodename }}"
+ openssl req -key "{{ nuage_ca_master_rest_server_key }}" -new -out "{{ nuage_mon_rest_server_crt_dir }}/restServer.req" -subj "/CN={{ nuage_mon_rest_server_host }}"
delegate_to: "{{ nuage_ca_master }}"
- name: Generate the crt file
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index 5bc735bb6..92e716a45 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -17,6 +17,8 @@ nuage_mon_rest_server_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodenam
nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.key"
nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
+nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(openshift.common.hostname) }}"
+
nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
nuage_service_account: system:serviceaccount:default:nuage
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
deleted file mode 100644
index b62287c12..000000000
--- a/roles/openshift_ansible_inventory/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-OpenShift Ansible Inventory
-=========
-
-Install and configure openshift-ansible-inventory.
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-oo_inventory_group
-oo_inventory_user
-oo_inventory_accounts
-oo_inventory_cache_max_age
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
deleted file mode 100644
index f53c00c80..000000000
--- a/roles/openshift_ansible_inventory/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-oo_inventory_group: root
-oo_inventory_owner: root
-oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
deleted file mode 100644
index e2db43477..000000000
--- a/roles/openshift_ansible_inventory/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
deleted file mode 100644
index 7f7387e80..000000000
--- a/roles/openshift_ansible_inventory/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Install and configure openshift-ansible-inventory
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
deleted file mode 100644
index 05c7a5f93..000000000
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present"
- with_items:
- - openshift-ansible-inventory
- - openshift-ansible-inventory-aws
- - openshift-ansible-inventory-gce
- when: not openshift.common.is_containerized | bool
-
-- name:
- copy:
- content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_inventory.yaml
- group: "{{ oo_inventory_group }}"
- owner: "{{ oo_inventory_owner }}"
- mode: "0640"
-
-- file:
- state: directory
- dest: /etc/ansible/inventory
- owner: root
- group: libra_ops
- mode: 0750
-
-- file:
- state: link
- src: /usr/share/ansible/inventory/multi_inventory.py
- dest: /etc/ansible/inventory/multi_inventory.py
- owner: root
- group: libra_ops
-
-# This cron uses the above location to call its job
-- name: Cron to keep cache fresh
- cron:
- name: 'multi_inventory'
- minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
- when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
-
-- name: Set cache location
- file:
- state: directory
- dest: "{{ oo_inventory_cache_location | dirname }}"
- owner: root
- group: libra_ops
- recurse: yes
- mode: '2770'
- when: oo_inventory_cache_location is defined
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
deleted file mode 100644
index 25c049282..000000000
--- a/roles/openshift_ansible_inventory/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for openshift_ansible_inventory
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index bfa60e5b0..c0a712513 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -5,7 +5,7 @@
- name: Pull CLI Image
command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_version }}
+ docker pull {{ openshift.common.cli_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
when: openshift.common.is_containerized | bool
- name: Create /usr/local/bin/openshift cli wrapper
diff --git a/roles/openshift_cli/templates/openshift.j2 b/roles/openshift_cli/templates/openshift.j2
index 437e08aab..8a3f3a257 100644
--- a/roles/openshift_cli/templates/openshift.j2
+++ b/roles/openshift_cli/templates/openshift.j2
@@ -5,14 +5,14 @@ fi
cmd=`basename $0`
user=`id -u`
group=`id -g`
-image_tag={{ openshift_version }}
+image_tag="{{ openshift_version }}"
>&2 echo """
================================================================================
ATTENTION: You are running ${cmd} via a wrapper around 'docker run {{ openshift.common.cli_image }}:${image_tag}'.
This wrapper is intended only to be used to bootstrap an environment. Please
install client tools on another host once you have granted cluster-admin
-privileges to a user.
+privileges to a user.
{% if openshift.common.deployment_type in ['openshift-enterprise','atomic-enterprise'] %}
See https://docs.openshift.com/enterprise/latest/cli_reference/get_started_cli.html
{% else %}
@@ -21,4 +21,8 @@ See https://docs.openshift.org/latest/cli_reference/get_started_cli.html
=================================================================================
"""
-docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}:${image_tag} "${@}"
+if [ -n "$image_tag" ]; then
+ image_tag=":$image_tag"
+fi
+
+docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}${image_tag} "${@}"
diff --git a/roles/openshift_clock/meta/main.yml b/roles/openshift_clock/meta/main.yml
new file mode 100644
index 000000000..3e175beb0
--- /dev/null
+++ b/roles/openshift_clock/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jeremiah Stuever
+ description: OpenShift Clock
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/openshift_clock/tasks/main.yaml b/roles/openshift_clock/tasks/main.yaml
new file mode 100644
index 000000000..5a8403f68
--- /dev/null
+++ b/roles/openshift_clock/tasks/main.yaml
@@ -0,0 +1,14 @@
+---
+- name: Set clock facts
+ openshift_facts:
+ role: clock
+ local_facts:
+ enabled: "{{ openshift_clock_enabled | default(None) }}"
+
+- name: Install ntp package
+ action: "{{ ansible_pkg_mgr }} name=ntp state=present"
+ when: openshift.clock.enabled | bool and not openshift.clock.chrony_installed | bool
+
+- name: Start and enable ntpd/chronyd
+ shell: timedatectl set-ntp true
+ when: openshift.clock.enabled | bool
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 0e51fd16f..cdea90413 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -32,9 +32,7 @@
docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}"
- set_fact:
- docker_options: >
- --insecure-registry={{ openshift.docker.hosted_registry_network }}
- {{ openshift.docker.options | default ('') }}
+ docker_options: "--insecure-registry={{ openshift.docker.hosted_registry_network }} {{ openshift.docker.options | default ('') }}"
when: openshift.docker.hosted_registry_insecure | default(False) | bool and openshift.docker.hosted_registry_network is defined
- set_fact:
@@ -59,14 +57,3 @@
l_common_version: "{{ common_version.stdout | default('0.0', True) }}"
when: not openshift.common.is_containerized | bool
-- name: Set docker version to be installed
- set_fact:
- docker_version: "{{ '1.8.2' }}"
- when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type in ['openshift', 'atomic-openshift'] ) or
- ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
-
-- name: Set docker version to be installed
- set_fact:
- docker_version: "{{ '1.9.1' }}"
- when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or
- ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )"
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 7cc548f69..de36b201b 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -13,6 +13,7 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_etcd_facts
+- role: openshift_clock
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 7d81ac927..f9d194909 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.0-1
+XPAAS_VERSION=ose-v1.3.1
ORIGIN_VERSION=${1:-v1.2}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json
index 5e03d9d48..46f93823c 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json
@@ -138,22 +138,19 @@
"name": "jboss-eap70-openshift"
},
"spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
"tags": [
{
- "name": "1.3-Beta",
+ "name": "1.3",
"annotations": {
- "description": "JBoss EAP 7.0 Beta S2I images.",
+ "description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
"supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
- "sampleRef": "7.0.x",
+ "sampleRef": "7.0.0.GA",
"version": "1.3"
- },
- "from": {
- "kind": "DockerImage",
- "name": "registry.access.redhat.com/jboss-eap-7-beta/eap70-openshift:1.3"
}
}
]
@@ -234,6 +231,16 @@
"supports":"amq:6.2,messaging,xpaas:1.2",
"version": "1.2"
}
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json
index 2b1680755..ce953c05f 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -180,7 +180,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -193,7 +196,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json
index 9759ed7c7..7d41a29ad 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -22,7 +22,13 @@
"required": true
},
{
- "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
@@ -60,6 +66,12 @@
"required": false
},
{
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
@@ -306,7 +318,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -319,7 +334,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -354,7 +369,7 @@
"readOnly": true
},
{
- "mountPath": "/opt/amq/data/kahadb",
+ "mountPath": "/opt/amq/data",
"name": "${APPLICATION_NAME}-amq-pvol"
}
],
@@ -436,6 +451,26 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
"name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
"value": "/etc/amq-secret-volume"
},
@@ -491,7 +526,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteOnce"
+ "ReadWriteMany"
],
"resources": {
"requests": {
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json
index a8b3d5714..5d5dd9840 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -22,6 +22,12 @@
"required": true
},
{
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
@@ -60,6 +66,12 @@
"required": false
},
{
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
@@ -180,7 +192,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -193,7 +208,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -222,7 +237,7 @@
"imagePullPolicy": "Always",
"volumeMounts": [
{
- "mountPath": "/opt/amq/data/kahadb",
+ "mountPath": "/opt/amq/data",
"name": "${APPLICATION_NAME}-amq-pvol"
}
],
@@ -284,6 +299,26 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "${AMQ_STORAGE_USAGE_LIMIT}"
}
@@ -313,7 +348,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteOnce"
+ "ReadWriteMany"
],
"resources": {
"requests": {
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json
index fdf0da9c3..4122a02a1 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -306,7 +306,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -319,7 +322,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json
index 2e8276adb..219b8ece7 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/decisionserver62-amq-s2i.json
@@ -571,7 +571,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json
index 4485fd264..c9ecee9cb 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "eap64-amq-persistent-s2i"
},
"labels": {
"template": "eap64-amq-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -64,6 +64,12 @@
"required": false
},
{
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
@@ -585,7 +591,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -598,7 +607,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -636,6 +645,11 @@
},
"ports": [
{
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
"name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
@@ -699,6 +713,30 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ },
+ {
"name": "AMQ_ADMIN_USERNAME",
"value": "${AMQ_ADMIN_USERNAME}"
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json
index 72d8c061b..99724db94 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 6 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "eap64-amq-s2i"
},
"labels": {
"template": "eap64-amq-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -592,7 +592,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -630,6 +630,11 @@
},
"ports": [
{
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
"name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-persistent-s2i.json
new file mode 100644
index 000000000..d9607ddd7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "eap70-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-persistent-s2i",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-s2i.json
new file mode 100644
index 000000000..552b637b8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-amq-s2i.json
@@ -0,0 +1,710 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "eap70-amq-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-s2i",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "eap7-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-basic-s2i.json
index 7dbf0eefa..f03fc69fa 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-basic-s2i.json
@@ -6,13 +6,13 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-basic-s2i"
},
"labels": {
"template": "eap70-basic-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -36,7 +36,7 @@
{
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
- "value": "7.0.x-develop",
+ "value": "7.0.0.GA",
"required": false
},
{
@@ -58,7 +58,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -172,7 +172,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-https-s2i.json
index 19ef56ca6..27d9b656d 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-https-s2i.json
@@ -6,13 +6,13 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-https-s2i"
},
"labels": {
"template": "eap70-https-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -42,7 +42,7 @@
{
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
- "value": "7.0.x-develop",
+ "value": "7.0.0.GA",
"required": false
},
{
@@ -88,7 +88,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -273,7 +273,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-persistent-s2i.json
index c48dcbd91..9cc786416 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mongodb-persistent-s2i"
},
"labels": {
"template": "eap70-mongodb-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -136,7 +136,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -366,7 +366,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-s2i.json
index b499f3132..4db6adcf8 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mongodb-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mongodb-s2i"
},
"labels": {
"template": "eap70-mongodb-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -130,7 +130,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -360,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-persistent-s2i.json
index 8eefa7855..91a79d797 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mysql-persistent-s2i"
},
"labels": {
"template": "eap70-mysql-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -146,7 +146,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -369,7 +369,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -605,6 +605,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-s2i.json
index 47aed69c9..63e4ecd2b 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-mysql-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mysql-s2i"
},
"labels": {
"template": "eap70-mysql-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -140,7 +140,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -363,7 +363,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -599,6 +599,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-persistent-s2i.json
index 8c74255bf..ea681d847 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-postgresql-persistent-s2i"
},
"labels": {
"template": "eap70-postgresql-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -131,7 +131,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -354,7 +354,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -590,6 +590,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-s2i.json
index 2ba4aef14..df95d823e 100644
--- a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap70-postgresql-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-postgresql-s2i"
},
"labels": {
"template": "eap70-postgresql-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -125,7 +125,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -348,7 +348,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -584,6 +584,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.2/xpaas-streams/jboss-image-streams.json
index 5e03d9d48..46f93823c 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-streams/jboss-image-streams.json
@@ -138,22 +138,19 @@
"name": "jboss-eap70-openshift"
},
"spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
"tags": [
{
- "name": "1.3-Beta",
+ "name": "1.3",
"annotations": {
- "description": "JBoss EAP 7.0 Beta S2I images.",
+ "description": "JBoss EAP 7.0 S2I images.",
"iconClass": "icon-jboss",
"tags": "builder,eap,javaee,java,jboss,xpaas",
"supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
"sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
"sampleContextDir": "kitchensink",
- "sampleRef": "7.0.x",
+ "sampleRef": "7.0.0.GA",
"version": "1.3"
- },
- "from": {
- "kind": "DockerImage",
- "name": "registry.access.redhat.com/jboss-eap-7-beta/eap70-openshift:1.3"
}
}
]
@@ -234,6 +231,16 @@
"supports":"amq:6.2,messaging,xpaas:1.2",
"version": "1.2"
}
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-basic.json
index 2b1680755..ce953c05f 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-basic.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -180,7 +180,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -193,7 +196,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent-ssl.json
index 9759ed7c7..7d41a29ad 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent-ssl.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -22,7 +22,13 @@
"required": true
},
{
- "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
"required": false
@@ -60,6 +66,12 @@
"required": false
},
{
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
"description": "Name of a secret containing SSL related files",
"name": "AMQ_SECRET",
"value": "amq-app-secret",
@@ -306,7 +318,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -319,7 +334,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -354,7 +369,7 @@
"readOnly": true
},
{
- "mountPath": "/opt/amq/data/kahadb",
+ "mountPath": "/opt/amq/data",
"name": "${APPLICATION_NAME}-amq-pvol"
}
],
@@ -436,6 +451,26 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
"name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
"value": "/etc/amq-secret-volume"
},
@@ -491,7 +526,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteOnce"
+ "ReadWriteMany"
],
"resources": {
"requests": {
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent.json
index a8b3d5714..5d5dd9840 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-persistent.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -22,6 +22,12 @@
"required": true
},
{
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
"description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
"name": "MQ_PROTOCOL",
"value": "openwire",
@@ -60,6 +66,12 @@
"required": false
},
{
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
"description": "The A-MQ storage usage limit",
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "100 gb",
@@ -180,7 +192,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -193,7 +208,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -222,7 +237,7 @@
"imagePullPolicy": "Always",
"volumeMounts": [
{
- "mountPath": "/opt/amq/data/kahadb",
+ "mountPath": "/opt/amq/data",
"name": "${APPLICATION_NAME}-amq-pvol"
}
],
@@ -284,6 +299,26 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
"name": "AMQ_STORAGE_USAGE_LIMIT",
"value": "${AMQ_STORAGE_USAGE_LIMIT}"
}
@@ -313,7 +348,7 @@
},
"spec": {
"accessModes": [
- "ReadWriteOnce"
+ "ReadWriteMany"
],
"resources": {
"requests": {
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-ssl.json
index fdf0da9c3..4122a02a1 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/amq62-ssl.json
@@ -6,13 +6,13 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -306,7 +306,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -319,7 +322,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/decisionserver62-amq-s2i.json
index 2e8276adb..219b8ece7 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/decisionserver62-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/decisionserver62-amq-s2i.json
@@ -571,7 +571,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-persistent-s2i.json
index 4485fd264..c9ecee9cb 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "eap64-amq-persistent-s2i"
},
"labels": {
"template": "eap64-amq-persistent-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -64,6 +64,12 @@
"required": false
},
{
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
"description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
"name": "MQ_PROTOCOL",
"value": "openwire",
@@ -585,7 +591,10 @@
},
"spec": {
"strategy": {
- "type": "Recreate"
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
},
"triggers": [
{
@@ -598,7 +607,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -636,6 +645,11 @@
},
"ports": [
{
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
"name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
@@ -699,6 +713,30 @@
"value": "${MQ_TOPICS}"
},
{
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ },
+ {
"name": "AMQ_ADMIN_USERNAME",
"value": "${AMQ_ADMIN_USERNAME}"
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-s2i.json
index 72d8c061b..99724db94 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap64-amq-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 6 A-MQ applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
- "version": "1.2.0"
+ "version": "1.3.1"
},
"name": "eap64-amq-s2i"
},
"labels": {
"template": "eap64-amq-s2i",
- "xpaas": "1.2.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -592,7 +592,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.2"
+ "name": "jboss-amq-62:1.3"
}
}
},
@@ -630,6 +630,11 @@
},
"ports": [
{
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
"name": "amqp",
"containerPort": 5672,
"protocol": "TCP"
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-persistent-s2i.json
new file mode 100644
index 000000000..d9607ddd7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "eap70-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-persistent-s2i",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-s2i.json
new file mode 100644
index 000000000..552b637b8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-amq-s2i.json
@@ -0,0 +1,710 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "eap70-amq-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-s2i",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "eap7-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-basic-s2i.json
index 7dbf0eefa..f03fc69fa 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-basic-s2i.json
@@ -6,13 +6,13 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-basic-s2i"
},
"labels": {
"template": "eap70-basic-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -36,7 +36,7 @@
{
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
- "value": "7.0.x-develop",
+ "value": "7.0.0.GA",
"required": false
},
{
@@ -58,7 +58,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -172,7 +172,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-https-s2i.json
index 19ef56ca6..27d9b656d 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-https-s2i.json
@@ -6,13 +6,13 @@
"iconClass": "icon-jboss",
"description": "Application template for EAP 7 applications built using S2I.",
"tags": "eap,javaee,java,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-https-s2i"
},
"labels": {
"template": "eap70-https-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -42,7 +42,7 @@
{
"description": "Git branch/tag reference",
"name": "SOURCE_REPOSITORY_REF",
- "value": "7.0.x-develop",
+ "value": "7.0.0.GA",
"required": false
},
{
@@ -88,7 +88,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -273,7 +273,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-persistent-s2i.json
index c48dcbd91..9cc786416 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mongodb-persistent-s2i"
},
"labels": {
"template": "eap70-mongodb-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -136,7 +136,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -366,7 +366,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-s2i.json
index b499f3132..4db6adcf8 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mongodb-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MongoDB applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mongodb-s2i"
},
"labels": {
"template": "eap70-mongodb-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -130,7 +130,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -360,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-persistent-s2i.json
index 8eefa7855..91a79d797 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mysql-persistent-s2i"
},
"labels": {
"template": "eap70-mysql-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -146,7 +146,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -369,7 +369,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -605,6 +605,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-s2i.json
index 47aed69c9..63e4ecd2b 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-mysql-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 MySQL applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,mysql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-mysql-s2i"
},
"labels": {
"template": "eap70-mysql-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -140,7 +140,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -363,7 +363,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -599,6 +599,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-persistent-s2i.json
index 8c74255bf..ea681d847 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-postgresql-persistent-s2i"
},
"labels": {
"template": "eap70-postgresql-persistent-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -131,7 +131,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -354,7 +354,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -590,6 +590,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-s2i.json
index 2ba4aef14..df95d823e 100644
--- a/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.2/xpaas-templates/eap70-postgresql-s2i.json
@@ -6,13 +6,13 @@
"description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
"iconClass": "icon-jboss",
"tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
- "version": "1.3.0"
+ "version": "1.3.1"
},
"name": "eap70-postgresql-s2i"
},
"labels": {
"template": "eap70-postgresql-s2i",
- "xpaas": "1.3.0"
+ "xpaas": "1.3.1"
},
"parameters": [
{
@@ -125,7 +125,7 @@
"required": false
},
{
- "description": "ActiveMQ cluster admin password",
+ "description": "A-MQ cluster admin password",
"name": "MQ_CLUSTER_PASSWORD",
"from": "[a-zA-Z0-9]{8}",
"generate": "expression",
@@ -348,7 +348,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-eap70-openshift:1.3-Beta"
+ "name": "jboss-eap70-openshift:1.3"
}
}
},
@@ -584,6 +584,14 @@
{
"name": "JGROUPS_CLUSTER_PASSWORD",
"value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
}
]
}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index b13343776..2f8af2454 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1549,11 +1549,13 @@ class OpenShiftFacts(object):
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
+ 'clock',
'cloudprovider',
'common',
'docker',
'etcd',
'hosted',
+ 'loadbalancer',
'master',
'node']
@@ -1571,7 +1573,15 @@ class OpenShiftFacts(object):
"Role %s is not supported by this module" % role
)
self.role = role
- self.system_facts = ansible_facts(module)
+
+ try:
+ # ansible-2.1
+ # pylint: disable=too-many-function-args
+ self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
+ except TypeError:
+ # ansible-1.9.x,ansible-2.0.x
+ self.system_facts = ansible_facts(module)
+
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
@@ -1712,13 +1722,25 @@ class OpenShiftFacts(object):
set_node_ip=False)
if 'docker' in roles:
- docker = dict(disable_push_dockerhub=False, hosted_registry_insecure=True)
+ docker = dict(disable_push_dockerhub=False,
+ hosted_registry_insecure=True,
+ options='--log-driver=json-file --log-opt max-size=50m')
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
docker['version'] = version_info['version']
defaults['docker'] = docker
+ if 'clock' in roles:
+ exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
+ if exit_code == 0:
+ chrony_installed = True
+ else:
+ chrony_installed = False
+ defaults['clock'] = dict(
+ enabled=True,
+ chrony_installed=chrony_installed)
+
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
@@ -1763,6 +1785,13 @@ class OpenShiftFacts(object):
router=dict()
)
+ if 'loadbalancer' in roles:
+ loadbalancer = dict(frontend_port='8443',
+ default_maxconn='20000',
+ global_maxconn='20000',
+ limit_nofile='100000')
+ defaults['loadbalancer'] = loadbalancer
+
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index ce410d1d5..ca1a9b1e4 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -19,6 +19,10 @@
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
when: not l_is_atomic | bool
+- name: Ensure yum-utils is installed
+ action: "{{ ansible_pkg_mgr }} name=yum-utils state=present"
+ when: not l_is_atomic | bool
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
@@ -32,18 +36,8 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
-
-# had to be done outside of the above because hostname isn't yet set
-- name: Gather hostnames for proxy configuration
- openshift_facts:
- role: common
- local_facts:
http_proxy: "{{ openshift_http_proxy | default(None) }}"
https_proxy: "{{ openshift_https_proxy | default(None) }}"
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 586c2ab91..88d112209 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -1,5 +1,5 @@
kh_kv: "KIBANA_HOSTNAME={{ openshift_hosted_logging_hostname | quote }}"
-es_cs_kv: "ES_CLUSTER_SIZE={{ openshift_hosted_logging_elasticsearch_cluster_size | quote }}"
+es_cs_kv: "ES_CLUSTER_SIZE={{ openshift_hosted_logging_elasticsearch_cluster_size | string | quote }}"
pmu_kv: "PUBLIC_MASTER_URL={{ openshift_hosted_logging_master_public_url | quote }}"
ip_kv: "{{ 'IMAGE_PREFIX=' ~ target_registry | quote if target_registry is defined else '' }}"
oc_process_values: "{{ kh_kv }},{{ es_cs_kv }},{{ pmu_kv }},{{ ip_kv }}"
diff --git a/roles/haproxy/README.md b/roles/openshift_loadbalancer/README.md
index 5bc415066..81fc282be 100644
--- a/roles/haproxy/README.md
+++ b/roles/openshift_loadbalancer/README.md
@@ -1,5 +1,5 @@
-HAProxy
-=======
+OpenShift HAProxy Loadbalancer
+==============================
TODO
diff --git a/roles/haproxy/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index a1524cfe1..d096019af 100644
--- a/roles/haproxy/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -1,10 +1,8 @@
---
-haproxy_frontend_port: 80
-
haproxy_frontends:
- name: main
binds:
- - "*:80"
+ - "*:8443"
default_backend: default
haproxy_backends:
@@ -14,9 +12,3 @@ haproxy_backends:
- name: web01
address: 127.0.0.1:9000
opts: check
-
-os_firewall_allow:
-- service: haproxy stats
- port: "9000/tcp"
-- service: haproxy balance
- port: "{{ haproxy_frontend_port }}/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/openshift_loadbalancer/handlers/main.yml
index 5b8691b26..5b8691b26 100644
--- a/roles/haproxy/handlers/main.yml
+++ b/roles/openshift_loadbalancer/handlers/main.yml
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
new file mode 100644
index 000000000..ed846a1ba
--- /dev/null
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -0,0 +1,20 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: OpenShift haproxy loadbalancer
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: openshift_loadbalancer_facts
+- role: os_firewall
+ os_firewall_allow:
+ - service: haproxy stats
+ port: "9000/tcp"
+ - service: haproxy balance
+ port: "{{ openshift.loadbalancer.frontend_port }}/tcp"
+- role: openshift_repos
diff --git a/roles/haproxy/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 837fa67db..03a7c0e4a 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -7,15 +7,15 @@
file:
path: /etc/systemd/system/haproxy.service.d
state: directory
- when: haproxy_limit_nofile is defined
+ when: "'limit_nofile' in openshift.loadbalancer"
- name: Configure the nofile limits for haproxy
ini_file:
dest: /etc/systemd/system/haproxy.service.d/limits.conf
section: Service
option: LimitNOFILE
- value: "{{ haproxy_limit_nofile }}"
- when: haproxy_limit_nofile is defined
+ value: "{{ openshift.loadbalancer.limit_nofile }}"
+ when: "'limit_nofile' in openshift.loadbalancer"
notify: restart haproxy
register: nofile_limit_result
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
index cb4380971..05e360d3b 100644
--- a/roles/haproxy/templates/haproxy.cfg.j2
+++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2
@@ -3,7 +3,7 @@
global
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
- maxconn {{ haproxy_global_maxconn | default('4000') }}
+ maxconn {{ openshift.loadbalancer.global_maxconn }}
user haproxy
group haproxy
daemon
@@ -31,14 +31,14 @@ defaults
timeout server 300s
timeout http-keep-alive 10s
timeout check 10s
- maxconn {{ haproxy_default_maxconn | default('3000') }}
+ maxconn {{ openshift.loadbalancer.default_maxconn }}
listen stats :9000
mode http
stats enable
stats uri /
-{% for frontend in haproxy_frontends %}
+{% for frontend in openshift.loadbalancer.frontends %}
frontend {{ frontend.name }}
{% for bind in frontend.binds %}
bind {{ bind }}
@@ -59,7 +59,7 @@ frontend {{ frontend.name }}
{% endif %}
{% endfor %}
-{% for backend in haproxy_backends %}
+{% for backend in openshift.loadbalancer.backends %}
backend {{ backend.name }}
balance {{ backend.balance }}
{% if 'mode' in backend %}
diff --git a/roles/openshift_loadbalancer_facts/README.md b/roles/openshift_loadbalancer_facts/README.md
new file mode 100644
index 000000000..57537cc03
--- /dev/null
+++ b/roles/openshift_loadbalancer_facts/README.md
@@ -0,0 +1,34 @@
+OpenShift HAProxy Loadbalancer Facts
+====================================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Andrew Butcher (abutcher@redhat.com)
diff --git a/roles/haproxy/meta/main.yml b/roles/openshift_loadbalancer_facts/meta/main.yml
index 0fad106a9..4c5b6552b 100644
--- a/roles/haproxy/meta/main.yml
+++ b/roles/openshift_loadbalancer_facts/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: HAProxy
+ author: Andrew Butcher
+ description: OpenShift loadbalancer facts
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.9
@@ -10,5 +10,4 @@ galaxy_info:
versions:
- 7
dependencies:
-- { role: os_firewall }
-- { role: openshift_repos }
+- role: openshift_facts
diff --git a/roles/openshift_loadbalancer_facts/tasks/main.yml b/roles/openshift_loadbalancer_facts/tasks/main.yml
new file mode 100644
index 000000000..dc244c0be
--- /dev/null
+++ b/roles/openshift_loadbalancer_facts/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: Set haproxy frontend port
+ openshift_facts:
+ role: loadbalancer
+ local_facts:
+ frontend_port: "{{ openshift_master_api_port | default(None) }}"
+
+- name: Set loadbalancer facts
+ openshift_facts:
+ role: loadbalancer
+ local_facts:
+ limit_nofile: "{{ openshift_loadbalancer_limit_nofile | default(None) }}"
+ default_maxconn: "{{ openshift_loadbalancer_default_maxconn | default(None) }}"
+ global_maxconn: "{{ openshift_loadbalancer_global_maxconn | default(None) }}"
+ frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ openshift.loadbalancer.frontend_port }}"
+ default_backend: atomic-openshift-api
+ backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars
+ | oo_select_keys(groups['oo_masters'])
+ | oo_haproxy_backend_masters(openshift.loadbalancer.frontend_port) }}"
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index d8834d27f..0a69b3eef 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -12,6 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_clock
- role: openshift_docker
- role: openshift_cli
- role: openshift_cloud_provider
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index fe0784ea2..28faee155 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -29,7 +29,7 @@
- name: Pull master image
command: >
- docker pull {{ openshift.master.master_image }}:{{ openshift_version }}
+ docker pull {{ openshift.master.master_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
when: openshift.common.is_containerized | bool
- name: Create openshift.common.data_dir
@@ -91,8 +91,16 @@
template:
dest: "{{ item.filename }}"
src: htpasswd.j2
- mode: 0600
backup: yes
+ when: item.kind == 'HTPasswdPasswordIdentityProvider' and openshift.master.manage_htpasswd | bool
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Ensure htpasswd file exists
+ copy:
+ dest: "{{ item.filename }}"
+ force: no
+ content: ""
+ mode: 0600
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: "{{ openshift.master.identity_providers }}"
@@ -139,7 +147,7 @@
- restart master api
- set_fact:
- translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
@@ -164,9 +172,14 @@
register: start_result
notify: Verify API Server
-- name: Stop and disable non HA master when running HA
+- name: Check for non-HA master service presence
+ command: systemctl show {{ openshift.common.service_type }}-master.service
+ register: master_svc_show
+ changed_when: false
+
+- name: Stop and disable non-HA master when running HA
service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
- when: openshift_master_ha | bool
+ when: openshift_master_ha | bool and 'LoadState=not-found' not in master_svc_show.stdout
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index a81270bab..458b56fd1 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -38,22 +38,54 @@
when: create_ha_unit_files | changed
# end workaround for missing systemd unit files
+- name: Preserve Master API Proxy Config options
+ command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ register: master_api_proxy
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ failed_when: false
+ changed_when: false
+
- name: Create the master api service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ backup: true
when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
notify:
- restart master api
+- name: Restore Master API Proxy Config Options
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ and master_api_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ line: "{{ item }}"
+ with_items: "{{ master_api_proxy.stdout_lines | default([]) }}"
+
+- name: Preserve Master Controllers Proxy Config options
+ command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ register: master_controllers_proxy
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ failed_when: false
+ changed_when: false
+
- name: Create the master controllers service env file
template:
src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ backup: true
when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
notify:
- restart master controllers
+- name: Restore Master Controllers Proxy Config Options
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ line: "{{ item }}"
+ with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift_master_cluster_method == "native"
+ and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+
- name: Install Master docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"
@@ -61,9 +93,23 @@
register: install_result
when: openshift.common.is_containerized | bool and openshift.master.ha is defined and not openshift.master.ha | bool
+- name: Preserve Master Proxy Config options
+ command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master
+ register: master_proxy
+ failed_when: false
+ changed_when: false
+
- name: Create the master service env file
template:
src: "atomic-openshift-master.j2"
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
+ backup: true
notify:
- restart master
+
+- name: Restore Master Proxy Config Options
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
+ line: "{{ item }}"
+ with_items: "{{ master_proxy.stdout_lines | default([]) }}"
+ when: master_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common \ No newline at end of file
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 896dd5e35..d51c5a0a3 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -42,6 +42,7 @@
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
+ manage_htpasswd: "{{ openshift_master_manage_htpasswd | default(true) }}"
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index ca29ad6e1..43b85204a 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -44,9 +44,8 @@
shell: >
{{ openshift.common.client_binary }} process -f \
/usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }} USE_PERSISTENT_STORAGE={{ hawkular_persistence }} \
- METRIC_DURATION={{ openshift.hosted.metrics.duration }} METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} |
- {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
+ HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }},USE_PERSISTENT_STORAGE={{ hawkular_persistence }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} \
+ | {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
register: oex_heapster_services
failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
changed_when: false
@@ -55,3 +54,11 @@
command: >
rm -rf {{hawkular_tmp_conf}}
changed_when: false
+
+- name: "Wait for image pull and deployer pod"
+ shell: "{{ openshift.common.client_binary }} get pods -n openshift-infra | grep metrics-deployer.*Completed"
+ register: result
+ until: result.rc == 0
+ retries: 60
+ delay: 10
+
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 91aed7aa3..efff5d6cd 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,16 +1,2 @@
---
-os_firewall_allow:
-- service: Kubernetes kubelet
- port: 10250/tcp
-- service: http
- port: 80/tcp
-- service: https
- port: 443/tcp
-- service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
-- service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
-- service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}"
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index db1776632..97ab8241b 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -12,10 +12,24 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: openshift_clock
- role: openshift_docker
- role: openshift_cloud_provider
- role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
-
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.node.use_openshift_sdn | bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index be70a170d..657e99e87 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -39,12 +39,12 @@
- name: Pull node image
command: >
- docker pull {{ openshift.node.node_image }}:{{ openshift_version }}
+ docker pull {{ openshift.node.node_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
when: openshift.common.is_containerized | bool
- name: Pull OpenVSwitch image
command: >
- docker pull {{ openshift.node.ovs_image }}:{{ openshift_version }}
+ docker pull {{ openshift.node.ovs_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
- name: Install the systemd units
@@ -52,8 +52,9 @@
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and ( ( install_node_result | changed )
- or ( install_ovs_sysconfig | changed ) )
+ when: openshift.common.is_containerized | bool and (install_node_result | changed or install_ovs_sysconfig | changed or install_node_dep_result | changed)
+ notify:
+ - restart node
- name: Start and enable openvswitch docker service
service: name=openvswitch.service enabled=yes state=started
@@ -113,16 +114,15 @@
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: node_start_result
ignore_errors: yes
-
+
- name: Check logs on failure
command: journalctl -xe
register: node_failure
when: node_start_result | failed
-
+
- name: Dump failure information
debug: var=node_failure
when: node_start_result | failed
-
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index f3262803a..e2a268260 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,6 +1,13 @@
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
+- name: Install Node dependencies docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service"
+ src: openshift.docker.node.dep.service
+ register: install_node_dep_result
+ when: openshift.common.is_containerized | bool
+
- name: Install Node docker service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
new file mode 100644
index 000000000..f66a78479
--- /dev/null
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -0,0 +1,11 @@
+[Unit]
+Requires=docker.service
+After=docker.service
+PartOf={{ openshift.common.service_type }}-node.service
+Before={{ openshift.common.service_type }}-node.service
+
+
+[Service]
+ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
+ExecStop=
+SyslogIdentifier={{ openshift.common.service_type }}-node-dep
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 30f09b250..443e18498 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -8,11 +8,14 @@ Requires=docker.service
Requires=openvswitch.service
{% endif %}
Wants={{ openshift.common.service_type }}-master.service
+Requires={{ openshift.common.service_type }}-node-dep.service
+After={{ openshift.common.service_type }}-node-dep.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index 09bae1777..51a43d113 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -52,6 +52,8 @@ EOF
systemctl restart dnsmasq
sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
- echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ fi
fi
fi
diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
index 0b21e0a65..0b21e0a65 100644
--- a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
+++ b/roles/openshift_repos/files/removed/repos/maxamillion-origin-next-epel-7.repo
diff --git a/roles/openshift_repos/tasks/centos_sig.yaml b/roles/openshift_repos/tasks/centos_sig.yaml
new file mode 100644
index 000000000..62cbef5db
--- /dev/null
+++ b/roles/openshift_repos/tasks/centos_sig.yaml
@@ -0,0 +1,6 @@
+---
+- name: Install the CentOS PaaS SIG release packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items:
+ - centos-release-paas-common
+ - centos-release-openshift-origin
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index af9fefec6..f0b6ed7cb 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -35,7 +35,7 @@
state: absent
with_fileglob:
- '*/repos/*'
- when: not openshift.common.is_containerized | bool
+ when: not openshift.common.is_containerized | bool
and not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
notify: refresh cache
@@ -52,14 +52,18 @@
notify: refresh cache
- name: Configure gpg keys if needed
- copy: src="{{ item }}" dest=/etc/pki/rpm-gpg/
+ copy:
+ src: "{{ item }}"
+ dest: /etc/pki/rpm-gpg/
with_fileglob:
- "{{ openshift_deployment_type }}/gpg_keys/*"
notify: refresh cache
when: not openshift.common.is_containerized | bool
- name: Configure yum repositories RHEL/CentOS
- copy: src="{{ item }}" dest=/etc/yum.repos.d/
+ copy:
+ src: "{{ item }}"
+ dest: /etc/yum.repos.d/
with_fileglob:
- "{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
@@ -67,8 +71,14 @@
and not openshift.common.is_containerized | bool
- name: Configure yum repositories Fedora
- copy: src="{{ item }}" dest=/etc/yum.repos.d/
+ copy:
+ src: "{{ item }}"
+ dest: /etc/yum.repos.d/
with_fileglob:
- "fedora-{{ openshift_deployment_type }}/repos/*"
notify: refresh cache
when: (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool
+
+- name: Configure the CentOS PaaS SIG repos if needed
+ include: centos_sig.yaml
+ when: not openshift.common.is_containerized | bool and deployment_type == 'origin' and ansible_distribution == 'CentOS'
diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml
index df0bb9fd4..7f3c054e7 100644
--- a/roles/openshift_storage_nfs/defaults/main.yml
+++ b/roles/openshift_storage_nfs/defaults/main.yml
@@ -16,6 +16,3 @@ openshift:
options: "*(rw,root_squash)"
volume:
name: "metrics"
-os_firewall_allow:
-- service: nfs
- port: "2049/tcp"
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index d675e0750..865865d9c 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,5 +11,8 @@ galaxy_info:
- 7
dependencies:
- role: os_firewall
+ os_firewall_allow:
+ - service: nfs
+ port: "2049/tcp"
- role: openshift_hosted_facts
- role: openshift_repos
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 08540f440..c4aa7db6a 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -16,7 +16,7 @@
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1'] )
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
diff --git a/test/env-setup b/test/env-setup
deleted file mode 100644
index 7456a641b..000000000
--- a/test/env-setup
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-CUR_PATH=$(pwd)
-
-PREFIX_PYTHONPATH=$CUR_PATH/inventory/:$CUR_PATH/roles/lib_yaml_editor/library
-
-
-export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH
diff --git a/test/units/README.md b/test/units/README.md
deleted file mode 100644
index 78a02c3ea..000000000
--- a/test/units/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Location for python unittests.
-
-These should be run by sourcing the env-setup:
-$ source test/env-setup
-
-Then navigate to the test/units/ directory.
-$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
deleted file mode 100755
index 168cd82b7..000000000
--- a/test/units/multi_inventory_test.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for MultiInventory
-'''
-
-import unittest
-import multi_inventory
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name
-class MultiInventoryTest(unittest.TestCase):
- '''
- Test class for multiInventory
- '''
-
-# def setUp(self):
-# '''setup method'''
-# pass
-
- def test_merge_simple_1(self):
- '''Testing a simple merge of 2 dictionaries'''
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2]})
-
- def test_merge_b_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- a = {"key1" : 1}
- b = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- b = {"key1" : 1}
- a = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- '''Testing a merge of a dictionary and a dictionary with an array'''
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1, 2]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
-
- def test_merge_array_hash(self):
- '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
- a = {"key1" : [1, 2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- '''Testing a merge on a dictionary for keys'''
- a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- '''Testing a recursive merge'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_array_item(self):
- '''Testing a recursive merge for an array'''
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_hash_item(self):
- '''Testing a recursive merge for a hash'''
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-# def tearDown(self):
-# '''TearDown method'''
-# pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/units/yedit_test.py b/test/units/yedit_test.py
deleted file mode 100755
index 09a65e888..000000000
--- a/test/units/yedit_test.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for yedit
-'''
-
-import unittest
-import os
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-from yedit import Yedit
-
-class YeditTest(unittest.TestCase):
- '''
- Test class for yedit
- '''
- data = {'a': 'a',
- 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
- }
-
- filename = 'yedit_test.yml'
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- yed = Yedit(YeditTest.filename)
- yed.yaml_dict = YeditTest.data
- yed.write()
-
- def test_load(self):
- ''' Testing a get '''
- yed = Yedit('yedit_test.yml')
- self.assertEqual(yed.yaml_dict, self.data)
-
- def test_write(self):
- ''' Testing a simple write '''
- yed = Yedit('yedit_test.yml')
- yed.put('key1', 1)
- yed.write()
- self.assertTrue(yed.yaml_dict.has_key('key1'))
- self.assertEqual(yed.yaml_dict['key1'], 1)
-
- def test_write_x_y_z(self):
- '''Testing a write of multilayer key'''
- yed = Yedit('yedit_test.yml')
- yed.put('x.y.z', 'modified')
- yed.write()
- yed.load()
- self.assertEqual(yed.get('x.y.z'), 'modified')
-
- def test_delete_a(self):
- '''Testing a simple delete '''
- yed = Yedit('yedit_test.yml')
- yed.delete('a')
- yed.write()
- yed.load()
- self.assertTrue(not yed.yaml_dict.has_key('a'))
-
- def test_delete_b_c(self):
- '''Testing delete of layered key '''
- yed = Yedit('yedit_test.yml')
- yed.delete('b.c')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertFalse(yed.yaml_dict['b'].has_key('c'))
-
- def test_create(self):
- '''Testing a create '''
- os.unlink(YeditTest.filename)
- yed = Yedit('yedit_test.yml')
- yed.create('foo', 'bar')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'] == 'bar')
-
- def test_create_content(self):
- '''Testing a create with content '''
- content = {"foo": "bar"}
- yed = Yedit("yedit_test.yml", content)
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'], 'bar')
-
- def test_array_insert(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[0]') == 'inject')
-
- def test_array_insert_first_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[1]') == 'f')
-
- def test_array_insert_second_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[2]') == 'g')
-
- def test_dict_array_dict_access(self):
- '''Testing a create with content'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- self.assertTrue(yed.get('b.c.d[0].[0].x.y') == 'inject')
-
- def test_dict_array_dict_replace(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.put('b.c.d[0].[0].x.y', 'testing')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'], 'testing')
-
- def test_dict_array_dict_remove(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.delete('b.c.d[0].[0].x.y')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertFalse(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
-
- def tearDown(self):
- '''TearDown method'''
- os.unlink(YeditTest.filename)
-
-if __name__ == "__main__":
- unittest.main()