summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.tito/releasers.conf5
-rw-r--r--README_AWS.md4
-rw-r--r--README_CONTAINERIZED_INSTALLATION.md4
-rwxr-xr-xbin/cluster18
-rw-r--r--callback_plugins/openshift_quick_installer.py291
-rw-r--r--filter_plugins/oo_filters.py12
-rw-r--r--inventory/byo/hosts.origin.example66
-rw-r--r--inventory/byo/hosts.ose.example64
-rw-r--r--openshift-ansible.spec28
-rw-r--r--playbooks/byo/openshift-cluster/config.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml16
l---------playbooks/byo/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml138
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml100
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml102
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml106
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/post.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre.yml311
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
l---------playbooks/common/openshift-cluster/upgrades/pre/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/upgrade.yml)190
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml75
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml3
-rw-r--r--playbooks/common/openshift-master/config.yml4
-rw-r--r--playbooks/common/openshift-master/scaleup.yml4
-rw-r--r--playbooks/common/openshift-nfs/config.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml14
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml52
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml161
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml24
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data13
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml12
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/cockpit-ui/tasks/main.yml9
-rw-r--r--roles/docker/defaults/main.yml1
-rw-r--r--roles/docker/tasks/main.yml8
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/etcd_client_certificates/tasks/main.yml3
-rw-r--r--roles/etcd_server_certificates/tasks/main.yml3
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j25
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml5
-rw-r--r--roles/openshift_examples/tasks/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py35
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml1
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml57
-rw-r--r--roles/openshift_hosted/templates/registry_config.j22
-rw-r--r--roles/openshift_hosted_logging/README.md3
-rw-r--r--roles/openshift_hosted_logging/defaults/main.yml2
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml18
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml58
-rw-r--r--roles/openshift_manage_node/tasks/main.yml10
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml3
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j23
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_named_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml8
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml10
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml14
-rw-r--r--roles/openshift_repos/handlers/main.yml4
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j21
-rw-r--r--roles/openshift_version/meta/main.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml4
-rw-r--r--roles/rhel_subscribe/tasks/enterprise.yml4
-rw-r--r--roles/rhel_subscribe/tasks/main.yml2
-rw-r--r--utils/Makefile25
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1186
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in167
-rw-r--r--utils/etc/ansible-quiet.cfg33
-rw-r--r--utils/etc/ansible.cfg4
-rw-r--r--utils/setup.py2
-rw-r--r--utils/src/MANIFEST.in1
-rw-r--r--utils/src/ooinstall/cli_installer.py23
-rw-r--r--utils/src/ooinstall/oo_config.py33
-rw-r--r--utils/src/ooinstall/openshift_ansible.py46
-rw-r--r--utils/src/ooinstall/utils.py10
-rw-r--r--utils/test/cli_installer_tests.py166
-rw-r--r--utils/test/oo_config_tests.py80
99 files changed, 2176 insertions, 1034 deletions
diff --git a/.gitignore b/.gitignore
index dcea26d60..48507c5d1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,4 @@ multi_inventory.yaml
.tags*
ansible.cfg
*.retry
+.vscode/*
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index d98832ff2..daa350cf6 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -22,6 +22,11 @@ releaser = tito.release.DistGitReleaser
branches = rhaos-3.3-rhel-7
srpm_disttag = .el7aos
+[aos-3.4]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.4-rhel-7
+srpm_disttag = .el7aos
+
[copr-openshift-ansible]
releaser = tito.release.CoprReleaser
project_name = @OpenShiftOnlineOps/openshift-ansible
diff --git a/README_AWS.md b/README_AWS.md
index cccb122f6..650a921a4 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -1,4 +1,4 @@
-:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/index.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/index.html) supported installation docs.
+:warning: **WARNING** :warning: This feature is community supported and has not been tested by Red Hat. Visit [docs.openshift.com](https://docs.openshift.com) for [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/planning.html) or [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/planning.html) supported installation docs.
AWS Setup Instructions
======================
@@ -196,5 +196,5 @@ You should now be ready to follow the **What's Next?** section of the advanced i
Refer to the advanced installation guide for your deployment type:
-* [OpenShift Enterprise](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next)
+* [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#what-s-next)
* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)
diff --git a/README_CONTAINERIZED_INSTALLATION.md b/README_CONTAINERIZED_INSTALLATION.md
index 56f25546c..c615154ef 100644
--- a/README_CONTAINERIZED_INSTALLATION.md
+++ b/README_CONTAINERIZED_INSTALLATION.md
@@ -1,8 +1,8 @@
# Overview
Users may now deploy containerized versions of OpenShift Origin, OpenShift
-Enterprise, or Atomic Enterprise Platform on Atomic
-Host[https://projectatomic.io] or RHEL, Centos, and Fedora. This includes
+Enterprise, or Atomic Enterprise Platform on [Atomic
+Host](https://projectatomic.io) or RHEL, Centos, and Fedora. This includes
OpenvSwitch based SDN.
diff --git a/bin/cluster b/bin/cluster
index 080bf244a..68d2a7cd4 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -68,6 +68,22 @@ class Cluster(object):
cluster['num_etcd'] = args.etcd
cluster['cluster_env'] = args.env
+ if args.cloudprovider and args.provider == 'openstack':
+ cluster['openshift_cloudprovider_kind'] = 'openstack'
+ cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
+ cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
+ cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
+ if 'OS_USER_DOMAIN_ID' in os.environ:
+ cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
+ if 'OS_USER_DOMAIN_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
+ if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
+ cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
+ if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
+ if 'OS_REGION_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
+
self.action(args, inventory, cluster, playbook)
def add_nodes(self, args):
@@ -332,6 +348,8 @@ This wrapper is overriding the following ansible variables:
create_parser = action_parser.add_parser('create', help='Create a cluster',
parents=[meta_parser])
+ create_parser.add_argument('-c', '--cloudprovider', action='store_true',
+ help='Enable the cloudprovider')
create_parser.add_argument('-m', '--masters', default=1, type=int,
help='number of masters to create in cluster')
create_parser.add_argument('-n', '--nodes', default=2, type=int,
diff --git a/callback_plugins/openshift_quick_installer.py b/callback_plugins/openshift_quick_installer.py
new file mode 100644
index 000000000..e2f125df9
--- /dev/null
+++ b/callback_plugins/openshift_quick_installer.py
@@ -0,0 +1,291 @@
+# pylint: disable=invalid-name,protected-access,import-error,line-too-long
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""This file is a stdout callback plugin for the OpenShift Quick
+Installer. The purpose of this callback plugin is to reduce the amount
+of produced output for customers and enable simpler progress checking.
+
+What's different:
+
+* Playbook progress is expressed as: Play <current_play>/<total_plays> (Play Name)
+ Ex: Play 3/30 (Initialize Megafrobber)
+
+* The Tasks and Handlers in each play (and included roles) are printed
+ as a series of .'s following the play progress line.
+
+* Many of these methods include copy and paste code from the upstream
+ default.py callback. We do that to give us control over the stdout
+ output while allowing Ansible to handle the file logging
+ normally. The biggest changes here are that we are manually setting
+ `log_only` to True in the Display.display method and we redefine the
+ Display.banner method locally so we can set log_only on that call as
+ well.
+
+"""
+
+from __future__ import (absolute_import, print_function)
+import imp
+import os
+import sys
+from ansible import constants as C
+from ansible.utils.color import colorize, hostcolor
+ANSIBLE_PATH = imp.find_module('ansible')[1]
+DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
+DEFAULT_MODULE = imp.load_source(
+ 'ansible.plugins.callback.default',
+ DEFAULT_PATH
+)
+
+try:
+ from ansible.plugins.callback import CallbackBase
+ BASECLASS = CallbackBase
+except ImportError: # < ansible 2.1
+ BASECLASS = DEFAULT_MODULE.CallbackModule
+
+
+reload(sys)
+sys.setdefaultencoding('utf-8')
+
+
+class CallbackModule(DEFAULT_MODULE.CallbackModule):
+
+ """
+ Ansible callback plugin
+ """
+ CALLBACK_VERSION = 2.2
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'openshift_quick_installer'
+ CALLBACK_NEEDS_WHITELIST = False
+ plays_count = 0
+ plays_total_ran = 0
+
+ def banner(self, msg, color=None):
+ '''Prints a header-looking line with stars taking up to 80 columns
+ of width (3 columns, minimum)
+
+ Overrides the upstream banner method so that display is called
+ with log_only=True
+ '''
+ msg = msg.strip()
+ star_len = (79 - len(msg))
+ if star_len < 0:
+ star_len = 3
+ stars = "*" * star_len
+ self._display.display("\n%s %s" % (msg, stars), color=color, log_only=True)
+
+ def v2_playbook_on_start(self, playbook):
+ """This is basically the start of it all"""
+ self.plays_count = len(playbook.get_plays())
+ self.plays_total_ran = 0
+
+ if self._display.verbosity > 1:
+ from os.path import basename
+ self.banner("PLAYBOOK: %s" % basename(playbook._file_name))
+
+ def v2_playbook_on_play_start(self, play):
+ """Each play calls this once before running any tasks
+
+We could print the number of tasks here as well by using
+`play.get_tasks()` but that is not accurate when a play includes a
+role. Only the tasks directly assigned to a play are exposed in the
+`play` object.
+ """
+ self.plays_total_ran += 1
+ print("")
+ print("Play %s/%s (%s)" % (self.plays_total_ran, self.plays_count, play.get_name()))
+
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self.banner(msg)
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ """This prints out the task header. For example:
+
+TASK [openshift_facts : Ensure PyYaml is installed] ***...
+
+Rather than print out all that for every task, we print a dot
+character to indicate a task has been started.
+ """
+ sys.stdout.write('.')
+
+ args = ''
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it thereyet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ' %s' % args
+ self.banner("TASK [%s%s]" % (task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG, log_only=True)
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_handler_task_start(self, task):
+ """Print out task header for handlers
+
+Rather than print out a header for every handler, we print a dot
+character to indicate a handler task has been started.
+"""
+ sys.stdout.write('.')
+
+ self.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
+
+ # pylint: disable=unused-argument,no-self-use
+ def v2_playbook_on_cleanup_task_start(self, task):
+ """Print out a task header for cleanup tasks
+
+Rather than print out a header for every handler, we print a dot
+character to indicate a handler task has been started.
+"""
+ sys.stdout.write('.')
+
+ self.banner("CLEANUP TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_include(self, included_file):
+ """Print out paths to statically included files"""
+ msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_runner_on_ok(self, result):
+ """This prints out task results in a fancy format
+
+The only thing we change here is adding `log_only=True` to the
+.display() call
+ """
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+ if result._task.action in ('include', 'include_role'):
+ return
+ elif result._result.get('changed', False):
+ if delegated_vars:
+ msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: [%s]" % result._host.get_name()
+ color = C.COLOR_CHANGED
+ else:
+ if delegated_vars:
+ msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = C.COLOR_OK
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color, log_only=True)
+
+ self._handle_warnings(result._result)
+
+ def v2_runner_item_on_ok(self, result):
+ """Print out task results for items you're iterating over"""
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if result._task.action in ('include', 'include_role'):
+ return
+ elif result._result.get('changed', False):
+ msg = 'changed'
+ color = C.COLOR_CHANGED
+ else:
+ msg = 'ok'
+ color = C.COLOR_OK
+
+ if delegated_vars:
+ msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg += ": [%s]" % result._host.get_name()
+
+ msg += " => (item=%s)" % (self._get_item(result._result),)
+
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=color, log_only=True)
+
+ def v2_runner_item_on_skipped(self, result):
+ """Print out task results when an item is skipped"""
+ if C.DISPLAY_SKIPPED_HOSTS:
+ msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_runner_on_skipped(self, result):
+ """Print out task results when a task (or something else?) is skipped"""
+ if C.DISPLAY_SKIPPED_HOSTS:
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: [%s]" % result._host.get_name()
+ if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP, log_only=True)
+
+ def v2_playbook_on_notify(self, res, handler):
+ """What happens when a task result is 'changed' and the task has a
+'notify' list attached.
+ """
+ self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP, log_only=True)
+
+ def v2_playbook_on_stats(self, stats):
+ """Print the final playbook run stats"""
+ self._display.display("", screen_only=True)
+ self.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+
+ self._display.display(
+ u"%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR)),
+ screen_only=True
+ )
+
+ self._display.display(
+ u"%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+ self._display.display("", screen_only=True)
+
+ # Some plays are conditional and won't run (such as load
+ # balancers) if they aren't required. Let the user know about
+ # this to avoid potential confusion.
+ if self.plays_total_ran != self.plays_count:
+ print("Installation Complete: Note: Play count is an estimate and some were skipped because your install does not require them")
+ self._display.display("", screen_only=True)
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8bb1c8de2..5358a244e 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -18,9 +18,17 @@ import re
import json
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.utils.unicode import to_unicode
from urlparse import urlparse
+try:
+ # ansible-2.2
+ # ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
+ # ansible.module_utils._text.to_text should be used instead.
+ from ansible.module_utils._text import to_text
+except ImportError:
+ # ansible-2.1
+ from ansible.utils.unicode import to_unicode as to_text
+
# Disabling too-many-public-methods, since filter methods are necessarily
# public
# pylint: disable=too-many-public-methods
@@ -627,7 +635,7 @@ class FilterModule(object):
default_flow_style=False,
Dumper=AnsibleDumper, **kw)
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return to_unicode("\n{0}".format(padded))
+ return to_text("\n{0}".format(padded))
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 8b3a6e403..7febefe95 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -65,10 +65,6 @@ openshift_release=v1.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure loggingPublicURL in the master config for aggregate logging
-# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html
-#openshift_master_logging_public_url=https://kibana.example.com
-
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
@@ -99,7 +95,6 @@ openshift_release=v1.2
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
-
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -145,6 +140,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
@@ -371,6 +368,55 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -394,6 +440,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
@@ -517,8 +570,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
-
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index af653f850..2645d4510 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -65,10 +65,6 @@ openshift_release=v3.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
-# Configure loggingPublicURL in the master config for aggregate logging
-# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
-#openshift_master_logging_public_url=https://kibana.example.com
-#
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
@@ -144,6 +140,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
#openshift_cloudprovider_openstack_username=username
#openshift_cloudprovider_openstack_password=password
+#openshift_cloudprovider_openstack_domain_id=domain_id
+#openshift_cloudprovider_openstack_domain_name=domain_name
#openshift_cloudprovider_openstack_tenant_id=tenant_id
#openshift_cloudprovider_openstack_tenant_name=tenant_name
#openshift_cloudprovider_openstack_region=region
@@ -370,6 +368,54 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Logging deployment
+#
+# Currently logging deployment is disabled by default, enable it by setting this
+#openshift_hosted_logging_deploy=true
+#
+# Logging storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/logging"
+#openshift_hosted_logging_storage_kind=nfs
+#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
+#openshift_hosted_logging_storage_host=nfs.example.com
+#openshift_hosted_logging_storage_nfs_directory=/exports
+#openshift_hosted_logging_storage_volume_name=logging
+#openshift_hosted_logging_storage_volume_size=10Gi
+#
+# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
+# your cloud platform use this.
+#openshift_hosted_logging_storage_kind=dynamic
+#
+# Option D - none -- Logging will use emptydir volumes which are destroyed when
+# pods are deleted
+#
+# Other Logging Options -- Common items you may wish to reconfigure, for the complete
+# list of options please see roles/openshift_hosted_logging/README.md
+#
+# Configure loggingPublicURL in the master config for aggregate logging, defaults
+# to https://kibana.{{ openshift_master_default_subdomain }}
+#openshift_master_logging_public_url=https://kibana.example.com
+# Configure the number of elastic search nodes, unless you're using dynamic provisioning
+# this value must be 1
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
+# Configure the prefix and version for the deployer image
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -394,6 +440,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
+# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
+# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
+# be assigned. It may contain a single CIDR that will be allocated from. For
+# security reasons, you should ensure that this range does not overlap with
+# the CIDRs reserved for external IPs, nodes, pods, or services.
+#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
#osm_host_subnet_length=8
@@ -517,8 +570,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
-
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index e57dbfb1b..d31447d7a 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -70,6 +70,9 @@ cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
# openshift-ansible-lookup-plugins install
cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
+# openshift-ansible-callback-plugins install
+cp -rp callback_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
# create symlinks from /usr/share/ansible/plugins/lookup ->
# /usr/share/ansible_plugins/lookup_plugins
pushd %{buildroot}%{_datadir}
@@ -77,6 +80,7 @@ mkdir -p ansible/plugins
pushd ansible/plugins
ln -s ../../ansible_plugins/lookup_plugins lookup
ln -s ../../ansible_plugins/filter_plugins filter
+ln -s ../../ansible_plugins/callback_plugins callback
popd
popd
@@ -87,6 +91,9 @@ pushd utils
mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
mkdir -p %{buildroot}%{_datadir}/atomic-openshift-utils/
cp etc/ansible.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible.cfg
+mkdir -p %{buildroot}%{_mandir}/man1/
+cp -v docs/man/man1/atomic-openshift-installer.1 %{buildroot}%{_mandir}/man1/
+cp etc/ansible-quiet.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible-quiet.cfg
popd
# Base openshift-ansible files
@@ -120,6 +127,7 @@ Requires: %{name} = %{version}
Requires: %{name}-roles = %{version}
Requires: %{name}-lookup-plugins = %{version}
Requires: %{name}-filter-plugins = %{version}
+Requires: %{name}-callback-plugins = %{version}
BuildArch: noarch
%description playbooks
@@ -156,6 +164,7 @@ Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name} = %{version}
Requires: %{name}-lookup-plugins = %{version}
Requires: %{name}-filter-plugins = %{version}
+Requires: %{name}-callback-plugins = %{version}
BuildArch: noarch
%description roles
@@ -197,6 +206,22 @@ BuildArch: noarch
%{_datadir}/ansible_plugins/lookup_plugins
%{_datadir}/ansible/plugins/lookup
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-callback-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package callback-plugins
+Summary: Openshift and Atomic Enterprise Ansible callback plugins
+Requires: %{name} = %{version}
+BuildArch: noarch
+
+%description callback-plugins
+%{summary}.
+
+%files callback-plugins
+%{_datadir}/ansible_plugins/callback_plugins
+%{_datadir}/ansible/plugins/callback
+
# ----------------------------------------------------------------------------------
# atomic-openshift-utils subpackage
# ----------------------------------------------------------------------------------
@@ -219,6 +244,8 @@ Atomic OpenShift Utilities includes
%{python_sitelib}/ooinstall*
%{_bindir}/atomic-openshift-installer
%{_datadir}/atomic-openshift-utils/ansible.cfg
+%{_mandir}/man1/*
+%{_datadir}/atomic-openshift-utils/ansible-quiet.cfg
%changelog
@@ -2384,4 +2411,3 @@ Atomic OpenShift Utilities includes
* Mon Oct 19 2015 Troy Dawson <tdawson@redhat.com> 3.0.2-1
- Initial Package
-
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 0b85b2485..fccb03982 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,7 +1,8 @@
---
- include: ../../common/openshift-cluster/verify_ansible_version.yml
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
@@ -14,7 +15,8 @@
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 3a285ab9f..9be6becc1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,6 +1,6 @@
- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
@@ -19,29 +19,29 @@
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index e740b12c0..7a3829283 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -1,67 +1,101 @@
---
-- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
-
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+# Configure the upgrade target for the common upgrade tasks:
- hosts: l_oo_all_hosts
- gather_facts: no
+ tags:
+ - pre_upgrade
tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+# Pre-upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
-- include: ../../../../common/openshift-cluster/upgrades/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
vars:
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
vars:
- openshift_deployment_type: "{{ deployment_type }}"
master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
+
- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/post.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..d6af71827
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,100 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..e2a33cc00
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,102 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index d6a99fcda..801c8065d 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -13,7 +13,7 @@
- include: initialize_openshift_version.yml
-- name: Set oo_options
+- name: Set oo_option facts
hosts: oo_all_hosts
tags:
- always
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 04dde632b..6d83d2527 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -11,3 +11,5 @@
hostname: "{{ openshift_hostname | default(None) }}"
- set_fact:
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ - set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 4aca4daf4..2ba7fded5 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -19,6 +19,12 @@
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - set_fact:
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
+ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
+ logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- role: openshift_cli
- role: openshift_hosted_facts
@@ -43,88 +49,20 @@
when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- role: openshift_metrics
- when: openshift.hosted.metrics.deploy | bool
- - role: cockpit-ui
- when: openshift.common.deployment_subtype == 'registry'
+ when: openshift_hosted_metrics_deploy | default(false) | bool
+ - role: openshift_hosted_logging
+ when: openshift_hosted_logging_deploy | default(false) | bool
+ openshift_hosted_logging_hostname: "{{ logging_hostname }}"
+ openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
+ openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
+ openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
+ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
-- name: Configure CA certificate for secure registry
- hosts: oo_nodes_to_config
- tags:
- - hosted
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- when: openshift.common.deployment_subtype == 'registry'
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Retrieve docker-registry route
- command: >
- {{ openshift.common.client_binary }} get route docker-registry
- --template='{{ '{{' }} .spec.host {{ '}}' }}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_route
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Retrieve registry service IP
- command: >
- {{ openshift.common.client_binary }} get service docker-registry
- --template='{{ '{{' }} .spec.clusterIP {{ '}}' }}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_service_ip
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Create registry CA directories
- file:
- path: "/etc/docker/certs.d/{{ item }}"
- state: directory
- with_items:
- - "{{ docker_registry_service_ip.stdout }}:5000"
- - "{{ docker_registry_route.stdout }}"
- - "docker-registry.default.svc.cluster.local:5000"
- when: openshift.common.deployment_subtype == 'registry'
- - name: Copy CA to registry CA directories
- copy:
- src: "{{ openshift.common.config_base }}/node/ca.crt"
- dest: "/etc/docker/certs.d/{{ item }}"
- remote_src: yes
- force: yes
- with_items:
- - "{{ docker_registry_service_ip.stdout }}:5000"
- - "{{ docker_registry_route.stdout }}"
- - "docker-registry.default.svc.cluster.local:5000"
- when: openshift.common.deployment_subtype == 'registry'
- notify:
- - Restart docker
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: False
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- handlers:
- - name: Restart docker
- service:
- name: docker
- state: restarted
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool )
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5b72c3450..4996c56a7 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -212,7 +212,7 @@
- name: Determine if node is currently scheduleable
command: >
{{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- get node {{ openshift.common.hostname | lower }} -o json
+ get node {{ openshift.node.nodename }} -o json
register: node_output
when: openshift_certificates_redeploy_ca | default(false) | bool
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -225,7 +225,7 @@
- name: Prepare for node evacuation
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -233,7 +233,7 @@
- name: Evacuate node
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -241,7 +241,7 @@
- name: Set node schedulability
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
new file mode 100644
index 000000000..6e953be69
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -0,0 +1,22 @@
+---
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Remove unused Docker images for Docker 1.10+ migration
+ shell: "docker rmi `docker images -aq`"
+ # Will fail on images still in use:
+ failed_when: false
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index e8a20aa2b..78f6c46f3 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -9,6 +9,7 @@
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: local_cert_sync_tmpdir
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Create service signer certificate
hosts: oo_first_master
@@ -17,6 +18,7 @@
command: mktemp -d /tmp/openshift-ansible-XXXXXXX
register: remote_cert_create_tmpdir
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Create service signer certificate
command: >
@@ -27,6 +29,7 @@
--serial=service-signer.serial.txt
args:
chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Retrieve service signer certificate
fetch:
@@ -38,12 +41,14 @@
with_items:
- "service-signer.crt"
- "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Delete remote temp directory
file:
name: "{{ remote_cert_create_tmpdir.stdout }}"
state: absent
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Deploy service signer certificate
hosts: oo_masters_to_config
@@ -55,6 +60,7 @@
with_items:
- "service-signer.crt"
- "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Delete local temp directory
hosts: localhost
@@ -67,3 +73,4 @@
name: "{{ local_cert_sync_tmpdir.stdout }}"
state: absent
changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 8002af4fc..fc26d029e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,7 +1,7 @@
---
# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_version to True if so.
+# variables, the available packages, and sets l_docker_upgrade to True if so.
- set_fact:
docker_upgrade: True
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
new file mode 100644
index 000000000..f3b3abe0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -0,0 +1,50 @@
+---
+- include: ../verify_ansible_version.yml
+
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+
+- include: ../initialize_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
new file mode 100644
index 000000000..4e375ac26
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -0,0 +1,40 @@
+---
+- name: Filter list of nodes to be upgraded if necessary
+ hosts: oo_first_master
+ tasks:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ command: >
+ {{ openshift.common.client_binary }}
+ get nodes
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --selector={{ openshift_upgrade_nodes_label }}
+ -o jsonpath='{.items[*].metadata.name}'
+ register: matching_nodes
+ changed_when: false
+ when: openshift_upgrade_nodes_label is defined
+
+ - set_fact:
+ nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}"
+ when: openshift_upgrade_nodes_label is defined
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade
+ changed_when: false
+
+ # Build up the oo_nodes_to_upgrade group, use the list filtered by label if
+ # present, otherwise hit all nodes:
+ - name: Evaluate oo_nodes_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups['temp_nodes_to_upgrade'] | default(groups['oo_nodes_to_config']) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml
deleted file mode 100644
index 42a24eaf8..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre.yml
+++ /dev/null
@@ -1,311 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-
-- include: ../initialize_facts.yml
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_config
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- vars:
- g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- This upgrade does not support Pacemaker:
- https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
- when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
-
-- include: ../../../common/openshift-cluster/initialize_openshift_version.yml
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- # Docker role (a dependency) should be told not to do anything to installed version
- # of docker, we handle this separately during upgrade. (the inventory may have a
- # docker_version defined, we don't want to actually do it until later)
- docker_protect_installed_version: True
-
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
-
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
-
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
-
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
-
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- # Additional checks for Atomic hosts:
-
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
- - fail:
- msg: This playbook requires access to Docker 1.10 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
new file mode 100644
index 000000000..8ecae4539
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
@@ -0,0 +1,6 @@
+---
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
new file mode 100644
index 000000000..06eb5f936
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -0,0 +1,31 @@
+---
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
new file mode 100644
index 000000000..ba4d77617
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ # Additional checks for Atomic hosts:
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
new file mode 100644
index 000000000..9a959a959
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -0,0 +1,37 @@
+---
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
new file mode 100644
index 000000000..354af3cde
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
@@ -0,0 +1,13 @@
+---
+- name: Verify node processes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ - openshift_docker_facts
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
new file mode 100644
index 000000000..9632626a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -0,0 +1,45 @@
+---
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ vars:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+ - fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index ba4fc63be..2c641e21e 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -1,39 +1,93 @@
---
###############################################################################
-# The restart playbook should be run after this playbook completes.
+# Upgrade Masters
###############################################################################
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
tasks:
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - set_fact:
+ etcd_backup_complete: True
- - name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
-###############################################################################
-# Upgrade Masters
-###############################################################################
- name: Upgrade master packages
hosts: oo_masters_to_config
handlers:
@@ -57,7 +111,6 @@
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master config hook for v3_3.
- include: create_service_signer_cert.yml
- when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
@@ -143,9 +196,9 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
- # it will be updated when we perform node upgrade.
- docker_protect_installed_version: True
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
@@ -177,71 +230,6 @@
- set_fact:
reconcile_complete: True
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-
-# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
-- name: Perform upgrades that may require node evacuation
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
- serial: 1
- any_errors_fatal: true
- roles:
- - openshift_facts
- handlers:
- - include: ../../../../roles/openshift_node/handlers/main.yml
- static: yes
- tasks:
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Mark unschedulable if host is a node
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - include: docker/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- - include: "{{ node_config_hook }}"
- when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
-
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
-
- - include: containerized_node_upgrade.yml
- when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
-
- - meta: flush_handlers
-
- - name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool
-
-
##############################################################################
# Gate on reconcile
##############################################################################
@@ -259,3 +247,13 @@
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
+
+- name: Upgrade Docker on dedicated containerized etcd hosts
+ hosts: oo_etcd_to_config:!oo_nodes_to_upgrade
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
new file mode 100644
index 000000000..9b572dcdf
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -0,0 +1,75 @@
+---
+- name: Evacuate and upgrade nodes
+ hosts: oo_nodes_to_upgrade
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ - docker
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ pre_tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ # NOTE: There is a transient "object has been modified" error here, allow a couple
+ # retries for a more reliable upgrade.
+ register: node_unsched
+ until: node_unsched.rc == 0
+ retries: 3
+ delay: 1
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: "{{ node_config_hook }}"
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
+
+ - meta: flush_handlers
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
+ register: node_sched
+ until: node_sched.rc == 0
+ retries: 3
+ delay: 1
+
+
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
index 2a143b065..d75b23bf7 100644
--- a/playbooks/common/openshift-cluster/verify_ansible_version.yml
+++ b/playbooks/common/openshift-cluster/verify_ansible_version.yml
@@ -1,5 +1,6 @@
---
-- hosts: localhost
+- name: Verify Ansible version is greater than or equal to 2.1.0.0
+ hosts: localhost
connection: local
become: no
gather_facts: no
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7f60cd9e4..a53c55c14 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,5 +1,5 @@
---
-- name: Set master facts
+- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
@@ -91,7 +91,7 @@
register: g_master_mktemp
changed_when: False
-- name: Check for cached session secrets
+- name: Determine if session secrets must be generated
hosts: oo_first_master
roles:
- role: openshift_facts
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 7304fca56..56ed09e1b 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -40,6 +40,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index ba7530ed7..000e46e80 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,5 +1,5 @@
---
-- name: Configure nfs hosts
+- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- role: openshift_facts
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 66eb293e5..364a62dd0 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -45,7 +45,7 @@
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
-- name: Configure node instances
+- name: Configure containerized nodes
hosts: oo_containerized_master_nodes
serial: 1
vars:
@@ -60,12 +60,12 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: openshift_cloud_provider
- - role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
@@ -85,7 +85,7 @@
when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
-- name: Configure node instances
+- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
@@ -99,12 +99,12 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: openshift_cloud_provider
- - role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
@@ -153,7 +153,7 @@
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-- name: Set schedulability
+- name: Set node schedulability
hosts: oo_first_master
vars:
openshift_nodes: "{{ groups.oo_nodes_to_config | default([]) }}"
@@ -172,6 +172,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index c5c479052..60cf21a5b 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -1,7 +1,7 @@
---
- name: Launch instance(s)
gce:
- instance_names: "{{ instances }}"
+ instance_names: "{{ instances|join(',') }}"
machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
deleted file mode 100644
index 285f8fa78..000000000
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-- name: Populate oo_dns_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - name: Evaluate oo_dns_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_dns_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups[cluster_id ~ '-dns'] }}"
-
- - name: Evaluate oo_hosts_to_add_in_dns
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_add_in_dns
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups['meta-clusterid_' ~ cluster_id] }}"
-
-- name: Gather facts
- hosts: oo_hosts_to_add_in_dns
- vars_files:
- - vars.yml
- - cluster_hosts.yml
-
-- name: Configure the DNS
- hosts: oo_dns_hosts_to_update
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
- - { role: dns,
- dns_forwarders: "{{ openstack_network_dns }}",
- dns_zones: [ novalocal, openstacklocal ],
- dns_all_hosts: "{{ g_all_hosts }}" }
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 458cf5ac7..755090f94 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -88,11 +88,6 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
- dns_image:
- type: string
- label: DNS image
- description: Name of the image for the DNS server
-
etcd_flavor:
type: string
label: Etcd flavor
@@ -113,11 +108,6 @@ parameters:
label: Infra flavor
description: Flavor of the infra node servers
- dns_flavor:
- type: string
- label: DNS flavor
- description: Flavor of the DNS server
-
outputs:
etcd_names:
@@ -168,26 +158,6 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ip:
- description: Floating IP of the DNS
- value:
- get_attr:
- - dns
- - addresses
- - str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- - 1
- - addr
-
resources:
net:
@@ -213,22 +183,7 @@ resources:
template: subnet_24_prefix.0/24
params:
subnet_24_prefix: { get_param: subnet_24_prefix }
- allocation_pools:
- - start:
- str_replace:
- template: subnet_24_prefix.3
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- end:
- str_replace:
- template: subnet_24_prefix.254
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers:
- - str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
+ dns_nameservers: { get_param: dns_nameservers }
router:
type: OS::Neutron::Router
@@ -428,44 +383,6 @@ resources:
port_range_min: 443
port_range_max: 443
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: etcd-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: node-secgrp }
-
etcd:
type: OS::Heat::ResourceGroup
properties:
@@ -599,79 +516,3 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on:
- interface
-
- dns:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: cluster_id-dns
- params:
- cluster_id: { get_param: cluster_id }
- key_name: { get_resource: keypair }
- image: { get_param: dns_image }
- flavor: { get_param: dns_flavor }
- networks:
- - port: { get_resource: dns-port }
- user_data: { get_resource: dns-config }
- user_data_format: RAW
-
- dns-port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: net }
- fixed_ips:
- - subnet: { get_resource: subnet }
- ip_address:
- str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- security_groups:
- - { get_resource: dns-secgrp }
-
- dns-floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: external_net }
- port_id: { get_resource: dns-port }
-
- dns-config:
- type: OS::Heat::MultipartMime
- properties:
- parts:
- - config:
- str_replace:
- template: |
- #cloud-config
- disable_root: true
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/network-scripts/ifcfg-eth0
- content: |
- DEVICE="eth0"
- BOOTPROTO="dhcp"
- DNS1="$dns1"
- DNS2="$dns2"
- PEERDNS="no"
- ONBOOT="yes"
- runcmd:
- - [ "/usr/bin/systemctl", "restart", "network" ]
- params:
- $dns1:
- get_param:
- - dns_nameservers
- - 0
- $dns2:
- get_param:
- - dns_nameservers
- - 1
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index f83f2c984..435139849 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -107,7 +107,7 @@ resources:
flavor: { get_param: flavor }
networks:
- port: { get_resource: port }
- user_data: { get_file: user-data }
+ user_data: { get_resource: config }
user_data_format: RAW
metadata:
environment: { get_param: cluster_env }
@@ -128,3 +128,25 @@ resources:
properties:
floating_network: { get_param: floating_network }
port_id: { get_resource: port }
+
+ config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ disable_root: true
+
+ hostname: { get_param: name }
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ # content: Defaults:openshift !requiretty
+ # Encoded in base64 to be sure that we do not forget the trailing newline or
+ # sudo will not be able to parse that file
+ encoding: b64
+ content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
deleted file mode 100644
index eb65f7cec..000000000
--- a/playbooks/openstack/openshift-cluster/files/user-data
+++ /dev/null
@@ -1,13 +0,0 @@
-#cloud-config
-disable_root: true
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 127e3e2e6..eb2c4269a 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -42,12 +42,10 @@
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
-P infra_image={{ deployment_vars[deployment_type].image }}
- -P dns_image={{ deployment_vars[deployment_type].image }}
-P etcd_flavor={{ openstack_flavor["etcd"] }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
args:
chdir: '{{ playbook_dir }}'
@@ -156,14 +154,6 @@
- '{{ parsed_outputs.infra_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - name: Add DNS groups and variables
- add_host:
- hostname: '{{ parsed_outputs.dns_name }}'
- ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: '{{ cluster_id }}-dns'
-
- name: Wait for ssh
wait_for:
host: '{{ item }}'
@@ -172,7 +162,6 @@
- '{{ parsed_outputs.master_floating_ips }}'
- '{{ parsed_outputs.node_floating_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - '{{ parsed_outputs.dns_floating_ip }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -184,7 +173,6 @@
- '{{ parsed_outputs.master_floating_ips }}'
- '{{ parsed_outputs.node_floating_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - '{{ parsed_outputs.dns_floating_ip }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 6d4d23963..332f27da7 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -15,8 +15,6 @@
- include_vars: vars.yml
- include_vars: cluster_hosts.yml
-- include: dns.yml
-
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 17063ef34..62111dacf 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -15,7 +15,6 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
- dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 9fc15ee8b..c573da6d6 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -36,7 +36,7 @@
- name: Retrieve docker-registry route
command: >
{{ openshift.common.client_binary }} get route docker-registry
- --template='{{ '{{' }} .spec.host {{ '}}' }}'
+ -o jsonpath='{.spec.host}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_route
@@ -45,18 +45,15 @@
- name: Retrieve cockpit kube url
command: >
{{ openshift.common.client_binary }} get route registry-console
- --template='https://{{ '{{' }} .spec.host {{ '}}' }}'
+ -o jsonpath='https://{.spec.host}'
-n default
register: registry_console_cockpit_kube_url
changed_when: false
-- set_fact:
- cockpit_image_prefix: "{{ '-p IMAGE_PREFIX=' ~ openshift_cockpit_deployer_prefix | default('') }}"
-
- name: Deploy registry-console
command: >
{{ openshift.common.client_binary }} new-app --template=registry-console
- {{ cockpit_image_prefix }}
+ {% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-p REGISTRY_HOST="{{ docker_registry_route.stdout }}"
-p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}"
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index da11ed0af..ed97d539c 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,2 +1 @@
---
-docker_protect_installed_version: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a89f5b91a..7147aa2d4 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,7 +11,7 @@
- name: Error out if Docker pre-installed but too old
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
- name: Error out if requested Docker is too old
fail:
@@ -31,19 +31,19 @@
- name: Fail if Docker version requested but downgrade is required
fail:
msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
# This involves an extremely slow migration process, users should instead run the
# Docker 1.10 upgrade playbook to accomplish this.
- name: Error out if attempting to upgrade Docker across the 1.10 boundary
fail:
msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') and not docker_protect_installed_version | bool
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
- name: Install Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and not docker_protect_installed_version | bool else '' }} state=present"
+ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present"
when: not openshift.common.is_atomic | bool
- name: Start the Docker service
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index ba4136327..2bc6a8678 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -34,16 +34,17 @@
command: systemctl show etcd.service
register: etcd_show
changed_when: false
+ failed_when: false
- name: Disable system etcd when containerized
- when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
+ when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
service:
name: etcd
state: stopped
enabled: no
- name: Mask system etcd when containerized
- when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
+ when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout
command: systemctl mask etcd
- name: Reload systemd units
diff --git a/roles/etcd_client_certificates/tasks/main.yml b/roles/etcd_client_certificates/tasks/main.yml
index 275aa0a63..93f4fd53c 100644
--- a/roles/etcd_client_certificates/tasks/main.yml
+++ b/roles/etcd_client_certificates/tasks/main.yml
@@ -93,6 +93,9 @@
-C {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
+ # Disables the following warning:
+ # Consider using unarchive module rather than running tar
+ warn: no
when: etcd_client_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml
index 718515023..d66a0a7bf 100644
--- a/roles/etcd_server_certificates/tasks/main.yml
+++ b/roles/etcd_server_certificates/tasks/main.yml
@@ -114,6 +114,9 @@
-C {{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ etcd_cert_subdir }}.tgz"
+ # Disables the following warning:
+ # Consider using unarchive module rather than running tar
+ warn: no
when: etcd_server_certs_missing | bool
delegate_to: "{{ etcd_ca_host }}"
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 223cb768d..c1de367d9 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -13,5 +13,6 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_docker
+ when: not skip_docker_role | default(False) | bool
- role: openshift_common
- role: openshift_cli_facts
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index ce452db24..313ee02b4 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -2,6 +2,11 @@
auth-url = {{ openshift_cloudprovider_openstack_auth_url }}
username = {{ openshift_cloudprovider_openstack_username }}
password = {{ openshift_cloudprovider_openstack_password }}
+{% if openshift_cloudprovider_openstack_domain_id is defined %}
+domain-id = {{ openshift_cloudprovider_openstack_domain_id }}
+{% elif openshift_cloudprovider_openstack_domain_name is defined %}
+domain-name = {{ openshift_cloudprovider_openstack_domain_name }}
+{% endif %}
{% if openshift_cloudprovider_openstack_tenant_id is defined %}
tenant-id = {{ openshift_cloudprovider_openstack_tenant_id }}
{% else %}
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 0ce142983..0c8a36d65 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -13,7 +13,7 @@
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
- hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.common.deployment_subtype != 'registry') }}"
+ hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(False) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
- set_fact:
diff --git a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml
index b9835e7e9..a8d4b1cbb 100644
--- a/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.3/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -202,11 +202,11 @@ items:
-
description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
+ value: "registry.access.redhat.com/openshift3/"
-
description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"'
name: IMAGE_VERSION
- value: "latest"
+ value: "3.3.0"
-
description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
name: IMAGE_PULL_SECRET
@@ -323,4 +323,3 @@ items:
description: "(Deprecated) Node selector operations Curator (label=value)."
name: CURATOR_OPS_NODESELECTOR
value: ""
-
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 058ad8888..82536e8af 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -19,6 +19,10 @@
- name: Create tar of OpenShift examples
local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" .
+ args:
+ # Disables the following warning:
+ # Consider using unarchive module rather than running tar
+ warn: no
become: False
register: copy_examples_tar
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 6c10e856a..9ffd399bc 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -149,6 +149,7 @@ def hostname_valid(hostname):
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
+ hostname.endswith('novalocal') or
len(hostname.split('.')) < 2):
return False
@@ -918,6 +919,14 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+def set_nodename(facts):
+ if 'node' in facts and 'common' in facts:
+ if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
+ facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
+ else:
+ facts['node']['nodename'] = facts['common']['hostname'].lower()
+ return facts
+
def migrate_oauth_template_facts(facts):
"""
Migrate an old oauth template fact to a newer format if it's present.
@@ -1220,7 +1229,7 @@ def apply_provider_facts(facts, provider_facts):
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
- facts['common'][ip_var]
+ facts['common'][h_var]
)
facts['provider'] = provider_facts
@@ -1700,6 +1709,7 @@ class OpenShiftFacts(object):
facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
+ facts = set_nodename(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type, deployment_subtype):
@@ -1810,10 +1820,25 @@ class OpenShiftFacts(object):
),
nfs=dict(
directory='/exports',
- options='*(rw,root_squash)'),
- openstack=dict(
- filesystem='ext4',
- volumeID='123'),
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access_modes=['ReadWriteOnce'],
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
+ logging=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='logging-es',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
host=None,
access_modes=['ReadWriteOnce'],
create_pv=True,
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d5077932b..ed0a2b38d 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -53,7 +53,6 @@
- include: secure.yml
static: no
- when: openshift.common.deployment_subtype == 'registry'
- include: storage/object_storage.yml
static: no
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index 4cb85df04..664edef41 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -1,5 +1,15 @@
---
-- name: Determine if registry certificates must be created
+- name: Create passthrough route for docker-registry
+ command: >
+ {{ openshift.common.client_binary }} create route passthrough
+ --service docker-registry
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: create_docker_registry_route
+ changed_when: "'already exists' not in create_docker_registry_route.stderr"
+ failed_when: "'already exists' not in create_docker_registry_route.stderr and create_docker_registry_route.rc != 0"
+
+- name: Determine if registry certificate must be created
stat:
path: "{{ openshift_master_config_dir }}/{{ item }}"
with_items:
@@ -12,7 +22,7 @@
- name: Retrieve registry service IP
command: >
{{ openshift.common.client_binary }} get service docker-registry
- --template='{{ '{{' }} .spec.clusterIP {{ '}}' }}'
+ -o jsonpath='{.spec.clusterIP}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_service_ip
@@ -45,8 +55,8 @@
- name: "Add the secret to the registry's pod service accounts"
command: >
- {{ openshift.common.client_binary }} secrets link {{ item }} registry-certificates
- --config={{ openshift_hosted_kubeconfig }}
+ {{ openshift.common.client_binary }} secrets add {{ item }} registry-certificates
+ --config={{ openshift_hosted_kubeconfig }}
-n default
with_items:
- registry
@@ -55,12 +65,12 @@
- name: Determine if registry-certificates secret volume attached
command: >
{{ openshift.common.client_binary }} get dc/docker-registry
- --template='{{ '{{' }} range .spec.template.spec.volumes {{ '}}' }}{{ '{{' }} .secret.secretName {{ '}}' }}{{ '{{' }} end {{ '}}' }}'
+ -o jsonpath='{.spec.template.spec.volumes[*].secret.secretName}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_volumes
changed_when: false
- failed_when: false
+ failed_when: "'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
- name: Attach registry-certificates secret volume
command: >
@@ -71,17 +81,48 @@
-n default
when: "'registry-certificates' not in docker_registry_volumes.stdout"
-- name: Set registry environment variables for TLS certificate
+- name: Determine if registry environment variables must be set
+ command: >
+ {{ openshift.common.client_binary }} env dc/docker-registry
+ --list
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: docker_registry_env
+ changed_when: false
+
+- name: Configure certificates in registry deplomentConfig
command: >
{{ openshift.common.client_binary }} env dc/docker-registry
REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key
--config={{ openshift_hosted_kubeconfig }}
-n default
+ when: "'REGISTRY_HTTP_TLS_CERTIFICATE=/etc/secrets/registry.crt' not in docker_registry_env.stdout or 'REGISTRY_HTTP_TLS_KEY=/etc/secrets/registry.key' not in docker_registry_env.stdout"
-# These commands are on a single line to preserve patch json.
+- name: Determine if registry liveness probe scheme is HTTPS
+ command: >
+ {{ openshift.common.client_binary }} get dc/docker-registry
+ -o jsonpath='{.spec.template.spec.containers[*].livenessProbe.httpGet.scheme}'
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: docker_registry_liveness_probe
+ changed_when: false
+
+# This command is on a single line to preserve patch json.
- name: Update registry liveness probe from HTTP to HTTPS
command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"livenessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
+ when: "'HTTPS' not in docker_registry_liveness_probe.stdout"
+
+- name: Determine if registry readiness probe scheme is HTTPS
+ command: >
+ {{ openshift.common.client_binary }} get dc/docker-registry
+ -o jsonpath='{.spec.template.spec.containers[*].readinessProbe.httpGet.scheme}'
+ --config={{ openshift_hosted_kubeconfig }}
+ -n default
+ register: docker_registry_readiness_probe
+ changed_when: false
+# This command is on a single line to preserve patch json.
- name: Update registry readiness probe from HTTP to HTTPS
command: "{{ openshift.common.client_binary }} patch dc/docker-registry --api-version=v1 -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"registry\",\"readinessProbe\":{\"httpGet\":{\"scheme\":\"HTTPS\"}}}]}}}}' --config={{ openshift_hosted_kubeconfig }} -n default"
+ when: "'HTTPS' not in docker_registry_readiness_probe.stdout"
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index 092b0fb35..cfe7ac81c 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -4,6 +4,8 @@ log:
http:
addr: :5000
storage:
+ delete:
+ enabled: true
cache:
blobdescriptor: inmemory
{% if openshift.hosted.registry.storage.provider == 's3' %}
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
index 03db1c4b1..12ffe777d 100644
--- a/roles/openshift_hosted_logging/README.md
+++ b/roles/openshift_hosted_logging/README.md
@@ -17,7 +17,8 @@
- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster).
- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534).
- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value)
-- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector to use for the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
+- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to.
+- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
diff --git a/roles/openshift_hosted_logging/defaults/main.yml b/roles/openshift_hosted_logging/defaults/main.yml
new file mode 100644
index 000000000..e357899e5
--- /dev/null
+++ b/roles/openshift_hosted_logging/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 082bb6ea2..f64c56248 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -40,7 +40,7 @@
- name: "Create templates for logging accounts and the deployer"
command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ examples_base }}/infrastructure-templates/{{ 'enterprise' if openshift_deployment_type == 'openshift-enterprise' else 'origin' }}/logging-deployer.yaml
register: template_output
failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
@@ -82,8 +82,8 @@
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
register: result
until: result.rc == 0
- retries: 15
- delay: 10
+ retries: 20
+ delay: 15
- name: "Process imagestream template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
@@ -102,7 +102,7 @@
until: result.rc == 0
failed_when: result.rc == 1 and 'not found' not in result.stderr
retries: 20
- delay: 10
+ delay: 5
- name: "Wait for component pods to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
@@ -114,7 +114,7 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
- delay: 10
+ delay: 15
- name: "Wait for ops component pods to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
@@ -127,7 +127,7 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
- delay: 10
+ delay: 15
- name: "Wait for fluentd DaemonSet to exist"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
@@ -135,10 +135,10 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
- delay: 10
+ delay: 5
- name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{ openshift_hostname }} {{ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l openshift_hosted_logging_fluentd_nodeselector' if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
- name: "Wait for fluentd to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
@@ -146,7 +146,7 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
- delay: 10
+ delay: 15
- debug:
msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 2ef3164e1..11412733b 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -1,32 +1,32 @@
-tr_or_ohlip: "{{ openshift_hosted_logging_image_prefix or target_registry or none }}"
-ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip is defined else '' }}"
-iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_image_version | quote if openshift_hosted_logging_image_version is defined else '' }}"
+tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
+ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
+iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
oc_new_app_values: "{{ ip_kv }} {{ iv_kv }}"
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname is defined else '' }}"
-kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname is defined else '' }}"
-pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url is defined else '' }}"
-es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size is defined else '' }}"
-es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size is defined else '' }}"
-es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram is defined else '' }}"
-es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram is defined else '' }}"
-es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size is defined else '' }}"
-es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size is defined else '' }}"
-es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix is defined else '' }}"
-es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix is defined else '' }}"
-es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic is defined else '' }}"
-es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic is defined else '' }}"
-es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group is defined else '' }}"
-es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector is defined else '' }}"
-es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector is defined else '' }}"
-fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector | quote if openshift_hosted_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}"
-kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector is defined else '' }}"
-kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector is defined else '' }}"
-cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector is defined else '' }}"
-cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector is defined else '' }}"
-ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster is defined else '' }}"
-use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal is defined else '' }}"
-journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source is defined else '' }}"
-journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head is defined else '' }}"
-ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret is defined else '' }}"
+kh_cmap_param: "{{ '--from-literal kibana-hostname=' ~ openshift_hosted_logging_hostname | quote if openshift_hosted_logging_hostname | default(none) is not none else '' }}"
+kh_ops_cmap_param: "{{ '--from-literal kibana-ops-hostname=' ~ openshift_hosted_logging_ops_hostname | quote if openshift_hosted_logging_ops_hostname | default(none) is not none else '' }}"
+pmu_cmap_param: "{{ '--from-literal public-master-url=' ~ openshift_hosted_logging_master_public_url | quote if openshift_hosted_logging_master_public_url | default(none) is not none else '' }}"
+es_cs_cmap_param: "{{ '--from-literal es-cluster-size=' ~ openshift_hosted_logging_elasticsearch_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_cluster_size | default(none) is not none else '' }}"
+es_ops_cs_cmap_param: "{{ '--from-literal es-ops-cluster-size=' ~ openshift_hosted_logging_elasticsearch_ops_cluster_size | string | quote if openshift_hosted_logging_elasticsearch_ops_cluster_size | default(none) is not none else '' }}"
+es_ir_cmap_param: "{{ '--from-literal es-instance-ram=' ~ openshift_hosted_logging_elasticsearch_instance_ram | quote if openshift_hosted_logging_elasticsearch_instance_ram | default(none) is not none else '' }}"
+es_ops_ir_cmap_param: "{{ '--from-literal es-ops-instance-ram=' ~ openshift_hosted_logging_elasticsearch_ops_instance_ram | quote if openshift_hosted_logging_elasticsearch_ops_instance_ram | default(none) is not none else '' }}"
+es_pvcs_cmap_param: "{{ '--from-literal es-pvc-size=' ~ openshift_hosted_logging_elasticsearch_pvc_size | quote if openshift_hosted_logging_elasticsearch_pvc_size | default(none) is not none else '' }}"
+es_ops_pvcs_cmap_param: "{{ '--from-literal es-ops-pvc-size=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_size | quote if openshift_hosted_logging_elasticsearch_ops_pvc_size | default(none) is not none else '' }}"
+es_pvcp_cmap_param: "{{ '--from-literal es-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_pvc_prefix | default(none) is not none else '' }}"
+es_ops_pvcp_cmap_param: "{{ '--from-literal es-ops-pvc-prefix=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | quote if openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(none) is not none else '' }}"
+es_pvcd_cmap_param: "{{ '--from-literal es-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_pvc_dynamic | default(none) is not none else '' }}"
+es_ops_pvcd_cmap_param: "{{ '--from-literal es-ops-pvc-dynamic=' ~ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | quote if openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(none) is not none else '' }}"
+es_sg_cmap_param: "{{ '--from-literal storage-group=' ~ openshift_hosted_logging_elasticsearch_storage_group | string | quote if openshift_hosted_logging_elasticsearch_storage_group | default(none) is not none else '' }}"
+es_ns_cmap_param: "{{ '--from-literal es-nodeselector=' ~ openshift_hosted_logging_elasticsearch_nodeselector | quote if openshift_hosted_logging_elasticsearch_nodeselector | default(none) is not none else '' }}"
+es_ops_ns_cmap_param: "{{ '--from-literal es-ops-nodeselector=' ~ openshift_hosted_logging_elasticsearch_ops_nodeselector | quote if openshift_hosted_logging_elasticsearch_ops_nodeselector | default(none) is not none else '' }}"
+fd_ns_cmap_param: "{{ '--from-literal fluentd-nodeselector=' ~ openshift_hosted_logging_fluentd_nodeselector_label | quote if openshift_hosted_logging_fluentd_nodeselector_label | default(none) is not none else 'logging-infra-fluentd=true' }}"
+kb_ns_cmap_param: "{{ '--from-literal kibana-nodeselector=' ~ openshift_hosted_logging_kibana_nodeselector | quote if openshift_hosted_logging_kibana_nodeselector | default(none) is not none else '' }}"
+kb_ops_ns_cmap_param: "{{ '--from-literal kibana-ops-nodeselector=' ~ openshift_hosted_logging_kibana_ops_nodeselector | quote if openshift_hosted_logging_kibana_ops_nodeselector | default(none) is not none else '' }}"
+cr_ns_cmap_param: "{{ '--from-literal curator-nodeselector=' ~ openshift_hosted_logging_curator_nodeselector | quote if openshift_hosted_logging_curator_nodeselector | default(none) is not none else '' }}"
+cr_ops_ns_cmap_param: "{{ '--from-literal curator-ops-nodeselector=' ~ openshift_hosted_logging_curator_ops_nodeselector | quote if openshift_hosted_logging_curator_ops_nodeselector | default(none) is not none else '' }}"
+ops_cmap_param: "{{ '--from-literal enable-ops-cluster=' ~ openshift_hosted_logging_enable_ops_cluster | string | lower | quote if openshift_hosted_logging_enable_ops_cluster | default(none) is not none else '' }}"
+use_journal_cmap_param: "{{ '--from-literal use-journal=' ~ openshift_hosted_logging_use_journal | string | lower | quote if openshift_hosted_logging_use_journal | default(none) is not none else '' }}"
+journal_source_cmap_param: "{{ '--from-literal journal-source=' ~ openshift_hosted_logging_journal_source | quote if openshift_hosted_logging_journal_source | default(none) is not none else '' }}"
+journal_read_from_head_cmap_param: "{{ '--from-literal journal-read-from-head=' ~ openshift_hosted_logging_journal_read_from_head | string | lower | quote if openshift_hosted_logging_journal_read_from_head | default(none) is not none else '' }}"
+ips_cmap_param: "{{ '--from-literal image-pull-secret=' ~ openshift_hosted_logging_image_pull_secret | quote if openshift_hosted_logging_image_pull_secret | default(none) is not none else '' }}"
deployer_cmap_params: "{{ kh_cmap_param }} {{ kh_ops_cmap_param }} {{ pmu_cmap_param }} {{ es_cs_cmap_param }} {{ es_ir_cmap_param }} {{ es_pvcs_cmap_param }} {{ es_pvcp_cmap_param }} {{ es_pvcd_cmap_param }} {{ es_ops_cs_cmap_param }} {{ es_ops_ir_cmap_param }} {{ es_ops_pvcs_cmap_param }} {{ es_ops_pvcp_cmap_param }} {{ es_ops_pvcd_cmap_param }} {{ es_sg_cmap_param }} {{ es_ns_cmap_param }} {{ es_ops_ns_cmap_param }} {{ fd_ns_cmap_param }} {{ kb_ns_cmap_param }} {{ kb_ops_ns_cmap_param }} {{ cr_ns_cmap_param }} {{ cr_ops_ns_cmap_param }} {{ ops_cmap_param }} {{ use_journal_cmap_param }} {{ journal_source_cmap_param }} {{ journal_read_from_head_cmap_param }} {{ ips_cmap_param }}"
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index f45ade751..d1cc5b274 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -14,7 +14,7 @@
- name: Wait for Node Registration
command: >
- {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.common.hostname | lower }}
+ {{ openshift.common.client_binary }} get node {{ hostvars[item].openshift.node.nodename }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
register: omd_get_node
@@ -26,19 +26,19 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ hostvars[item].openshift.common.hostname | lower }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
+ {{ openshift.common.admin_binary }} manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.common.hostname is defined
+ when: hostvars[item].openshift.node.nodename is defined
- name: Label nodes
command: >
- {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.common.hostname | lower }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }}
+ {{ openshift.common.client_binary }} label --overwrite node {{ hostvars[item].openshift.node.nodename }} {{ hostvars[item].openshift.node.labels | oo_combine_dict }}
--config={{ openshift_manage_node_kubeconfig }}
-n default
with_items: "{{ openshift_nodes }}"
- when: hostvars[item].openshift.common.hostname is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {}
+ when: hostvars[item].openshift.node.nodename is defined and 'labels' in hostvars[item].openshift.node and hostvars[item].openshift.node.labels != {}
- name: Delete temp directory
file:
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index edb7369de..913f3b0ae 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -24,6 +24,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d8a4aa9bb..ce2f96723 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -178,13 +178,14 @@
command: systemctl show {{ openshift.common.service_type }}-master.service
register: master_svc_show
changed_when: false
+ failed_when: false
- name: Stop and disable non-HA master when running HA
service:
name: "{{ openshift.common.service_type }}-master"
enabled: no
state: stopped
- when: openshift_master_ha | bool and 'LoadState=not-found' not in master_svc_show.stdout
+ when: openshift_master_ha | bool and master_svc_show.rc == 0 and 'LoadState=not-found' not in master_svc_show.stdout
- set_fact:
master_service_status_changed: "{{ start_result | changed }}"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index ced3eb76f..4d45e8591 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -164,6 +164,9 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }}
+{% if openshift_master_ingress_ip_network_cidr is defined %}
+ ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}
+{% endif %}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index edb7369de..913f3b0ae 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -24,6 +24,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml
index 7f20cf401..1bcf9ef67 100644
--- a/roles/openshift_named_certificates/tasks/main.yml
+++ b/roles/openshift_named_certificates/tasks/main.yml
@@ -28,19 +28,19 @@
- name: Land named certificates
copy:
src: "{{ item.certfile }}"
- dest: "{{ named_certs_dir }}"
+ dest: "{{ named_certs_dir }}/{{ item.certfile | basename }}"
with_items: "{{ named_certificates }}"
- name: Land named certificate keys
copy:
src: "{{ item.keyfile }}"
- dest: "{{ named_certs_dir }}"
+ dest: "{{ named_certs_dir }}/{{ item.keyfile | basename }}"
mode: 0600
with_items: "{{ named_certificates }}"
- name: Land named CA certificates
copy:
src: "{{ item }}"
- dest: "{{ named_certs_dir }}"
+ dest: "{{ named_certs_dir }}/{{ item | basename }}"
mode: 0600
with_items: "{{ named_certificates | oo_collect('cafile') }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 995169dd6..be07bd2d3 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -93,9 +93,9 @@
create: true
with_items:
- regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}"
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}"
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
notify:
- restart node
@@ -134,6 +134,10 @@
command: >
curl --silent --cacert {{ openshift.common.config_base }}/node/ca.crt
{{ openshift_node_master_api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 98ef1ffd4..a2192a4d0 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -56,12 +56,12 @@
create: true
with_items:
- regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy }}"
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy }}"
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
- when: "{{ openshift.common.http_proxy is defined and openshift.common.http_proxy != '' }}"
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 68d153052..9bcaf4d84 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -33,7 +33,7 @@ networkConfig:
{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
{% endif %}
-nodeName: {{ openshift.common.hostname | lower }}
+nodeName: {{ openshift.node.nodename }}
podManifestConfig:
servingInfo:
bindAddress: 0.0.0.0:10250
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
new file mode 100644
index 000000000..f2299cecf
--- /dev/null
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -0,0 +1,10 @@
+---
+- name: update ca trust
+ command: update-ca-trust
+ notify:
+ - restart docker after updating ca trust
+
+- name: restart docker after updating ca trust
+ service:
+ name: docker
+ state: restarted
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index fef7caab8..80ab4bb1d 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -91,6 +91,9 @@
-C {{ openshift_node_generated_config_dir }} .
args:
creates: "{{ openshift_node_generated_config_dir }}.tgz"
+ # Disables the following warning:
+ # Consider using unarchive module rather than running tar
+ warn: no
when: node_certs_missing | bool
delegate_to: "{{ openshift_ca_host }}"
@@ -121,3 +124,14 @@
when: node_certs_missing | bool
delegate_to: localhost
become: no
+
+- name: Copy OpenShift CA to system CA trust
+ copy:
+ src: "{{ item.cert }}"
+ dest: "/etc/pki/ca-trust/source/anchors/{{ item.id }}-{{ item.cert | basename }}"
+ remote_src: yes
+ with_items:
+ - id: openshift
+ cert: "{{ openshift_node_cert_dir }}/ca.crt"
+ notify:
+ - update ca trust
diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml
index 198fc7d6e..cdb0d8a48 100644
--- a/roles/openshift_repos/handlers/main.yml
+++ b/roles/openshift_repos/handlers/main.yml
@@ -1,3 +1,7 @@
---
- name: refresh cache
command: "{{ ansible_pkg_mgr }} clean all"
+ args:
+ # Disables the following warning:
+ # Consider using yum module rather than running yum
+ warn: no
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 08d0b8540..4716c77ae 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -28,6 +28,8 @@
with_items:
- "{{ openshift.hosted.registry }}"
- "{{ openshift.hosted.metrics }}"
+ - "{{ openshift.hosted.logging }}"
+
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index d6d936b72..2d6dd85e3 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -1,2 +1,3 @@
{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
{{ openshift.hosted.metrics.storage.nfs.directory }}/{{ openshift.hosted.metrics.storage.volume.name }} {{ openshift.hosted.metrics.storage.nfs.options }}
+{{ openshift.hosted.logging.storage.nfs.directory }}/{{ openshift.hosted.logging.storage.volume.name }} {{ openshift.hosted.logging.storage.nfs.options }}
diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml
index 70974da17..37c80c29e 100644
--- a/roles/openshift_version/meta/main.yml
+++ b/roles/openshift_version/meta/main.yml
@@ -15,4 +15,4 @@ dependencies:
- role: openshift_repos
- role: openshift_docker_facts
- role: docker
- when: openshift.common.is_containerized | default(False) | bool
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 774916798..470d4f4f9 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -1,6 +1,10 @@
---
- name: Check if firewalld is installed
command: rpm -q firewalld
+ args:
+ # Disables the following warning:
+ # Consider using yum, dnf or zypper module rather than running rpm
+ warn: no
register: pkg_check
failed_when: pkg_check.rc > 1
changed_when: no
diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml
index 8d11276d0..291df6822 100644
--- a/roles/rhel_subscribe/tasks/enterprise.yml
+++ b/roles/rhel_subscribe/tasks/enterprise.yml
@@ -7,7 +7,7 @@
when: deployment_type == 'enterprise'
- set_fact:
- default_ose_version: '3.2'
+ default_ose_version: '3.3'
when: deployment_type in ['atomic-enterprise', 'openshift-enterprise']
- set_fact:
@@ -16,7 +16,7 @@
- fail:
msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type"
when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or
- ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2'] )
+ ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3'] )
- name: Enable RHEL repositories
command: subscription-manager repos \
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 343020dce..ba3b9a923 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -4,7 +4,7 @@
# to make it able to enable repositories
- set_fact:
- rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('OpenShift Enterprise, Premium*', True) }}"
+ rhel_subscription_pool: "{{ lookup('oo_option', 'rhel_subscription_pool') | default(rhsub_pool, True) | default('Red Hat OpenShift Container Platform, Premium*', True) }}"
rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
diff --git a/utils/Makefile b/utils/Makefile
index 79c27626a..59aff92fd 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -25,6 +25,12 @@ NAME := oo-install
TESTPACKAGE := oo-install
SHORTNAME := ooinstall
+# This doesn't evaluate until it's called. The -D argument is the
+# directory of the target file ($@), kinda like `dirname`.
+ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
+MANPAGES := docs/man/man1/atomic-openshift-installer.1
+VERSION := 1.3
+
sdist: clean
python setup.py sdist
rm -fR $(SHORTNAME).egg-info
@@ -35,6 +41,21 @@ clean:
@rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
@rm -fR $(NAME)env
+
+# To force a rebuild of the docs run 'touch' on any *.in file under
+# docs/man/man1/
+docs: $(MANPAGES)
+
+# Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more
+# recently than %.1.asciidoc.
+%.1.asciidoc: %.1.asciidoc.in
+ sed "s/%VERSION%/$(VERSION)/" $< > $@
+
+# Regenerate %.1 if %.1.asciidoc or VERSION has been modified more
+# recently than %.1. (Implicitly runs the %.1.asciidoc recipe)
+%.1: %.1.asciidoc
+ $(ASCII2MAN)
+
viewcover:
xdg-open cover/index.html
@@ -59,7 +80,7 @@ ci-pylint:
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py
+ . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py
ci-list-deps:
@echo "#############################################"
@@ -72,12 +93,14 @@ ci-pyflakes:
@echo "# Running Pyflakes Compliance Tests in virtualenv"
@echo "#################################################"
. $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
+ . $(NAME)env/bin/activate && pyflakes ../callback_plugins/openshift_quick_installer.py
ci-pep8:
@echo "#############################################"
@echo "# Running PEP8 Compliance Tests in virtualenv"
@echo "#############################################"
. $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 src/$(SHORTNAME)/
+ . $(NAME)env/bin/activate && pep8 --ignore=E501,E121,E124 ../callback_plugins/openshift_quick_installer.py
ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests
:
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1
new file mode 100644
index 000000000..4da82191b
--- /dev/null
+++ b/utils/docs/man/man1/atomic-openshift-installer.1
@@ -0,0 +1,186 @@
+'\" t
+.\" Title: atomic-openshift-installer
+.\" Author: [see the "AUTHOR" section]
+.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
+.\" Date: 09/28/2016
+.\" Manual: atomic-openshift-installer
+.\" Source: atomic-openshift-utils 1.3
+.\" Language: English
+.\"
+.TH "ATOMIC\-OPENSHIFT\-I" "1" "09/28/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+atomic-openshift-installer \- Interactive OpenShift Container Platform (OCP) installer
+.SH "SYNOPSIS"
+.sp
+atomic\-openshift\-installer [OPTIONS] COMMAND [OPTS]
+.SH "DESCRIPTION"
+.sp
+\fBatomic\-openshift\-installer\fR makes the process for installing OCP easier by interactively gathering the data needed to run on each host\&. It can also be run in unattended mode if provided with a configuration file\&.
+.SH "OPTIONS"
+.sp
+The following options are common to all commands\&.
+.PP
+\fB\-u\fR, \fB\-\-unattended\fR
+.RS 4
+Run installer in
+\fBunattended\fR
+mode\&. You will not be prompted to answer any questions\&.
+.RE
+.PP
+\fB\-c\fR, \fB\-\-configuration\fR \fIPATH\fR
+.RS 4
+Provide an alternate
+\fIPATH\fR
+to an
+\fIinstaller\&.cfg\&.yml\fR
+file\&.
+.RE
+.PP
+\fB\-a\fR \fIDIRECTORY\fR, \fB\-\-ansible\-playbook\-directory\fR \fIDIRECTORY\fR
+.RS 4
+Manually set the
+\fIDIRECTORY\fR
+in which to look for Ansible playbooks\&.
+.RE
+.PP
+\fB\-\-ansible\-log\-path\fR \fIPATH\fR
+.RS 4
+Specify the
+\fIPATH\fR
+of the directory in which to save Ansible logs\&.
+.RE
+.PP
+\fB\-v\fR, \fB\-\-verbose\fR
+.RS 4
+Run the installer with more verbosity\&.
+.RE
+.PP
+\fB\-d\fR, \fB\-\-debug\fR
+.RS 4
+Enable installer debugging\&. Logs are saved in
+\fI/tmp/installer\&.txt\fR\&.
+.RE
+.PP
+\fB\-h\fR, \fB\-\-help\fR
+.RS 4
+Show the usage help and exit\&.
+.RE
+.SH "COMMANDS"
+.sp
+\fBatomic\-openshift\-installer\fR has three modes of operation:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBinstall\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBuninstall\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBupgrade\fR
+.RE
+.sp
+The options specific to each command are described in the following sections\&.
+.SH "INSTALL"
+.sp
+The \fBinstall\fR command will guide you through steps required to install an OCP cluster\&. After all of the required information has been collected (target hosts, storage options, high\-availability), the installation will begin\&.
+.PP
+\fB\-f\fR, \fB\-\-force\fR
+.RS 4
+Forces an installation\&. This means that hosts with existing installations will be reinstalled if required\&.
+.RE
+.PP
+\fB\-\-gen\-inventory\fR
+.RS 4
+Generate an Ansible inventory file and exit\&. The default location for the inventory file is
+\fI~/\&.config/openshift/hosts\fR\&.
+.RE
+.SH "UNINSTALL"
+.sp
+The \fBuninstall\fR command will uninstall OCP from your target hosts\&. This command has no additional options\&.
+.SH "UPGRADE"
+.sp
+The \fBupgrade\fR command will upgrade a cluster of hosts to a newer version of OCP\&.
+.PP
+\fB\-l\fR, \fB\-\-latest\-minor\fR
+.RS 4
+Upgrade to the latest minor version\&. For example, if you are running version
+\fB3\&.2\&.1\fR
+then this could upgrade you to
+\fB3\&.2\&.2\fR\&.
+.RE
+.PP
+\fB\-n\fR, \fB\-\-next\-major\fR
+.RS 4
+Upgrade to the latest major version\&. For example, if you are running version
+\fB3\&.2\fR
+then this could upgrade you to
+\fB3\&.3\fR\&.
+.RE
+.SH "FILES"
+.sp
+\fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&.
+.sp
+\fB~/\&.config/openshift/hosts\fR \(em Generated Ansible inventory file\&. Used to run the Ansible playbooks for install, uninstall, and upgrades\&.
+.sp
+\fB/tmp/ansible\&.log\fR \(em The default location of the ansible log file\&.
+.sp
+\fB/tmp/installer\&.txt\fR \(em The location of the log file for debugging the installer\&.
+.SH "AUTHOR"
+.sp
+Red Hat OpenShift Productization team
+.sp
+For a complete list of contributors, please visit the GitHub charts page\&.
+.SH "COPYRIGHT"
+.sp
+Copyright \(co 2016 Red Hat, Inc\&.
+.sp
+\fBatomic\-openshift\-installer\fR is released under the terms of the ASL 2\&.0 license\&.
+.SH "SEE ALSO"
+.sp
+\fBansible\fR(1), \fBansible\-playbook\fR(1)
+.sp
+\fBThe openshift\-ansible GitHub Project\fR \(em https://github\&.com/openshift/openshift\-ansible/
+.sp
+\fBThe atomic\-openshift\-installer Documentation\fR \(em https://docs\&.openshift\&.com/container\-platform/3\&.3/install_config/install/quick_install\&.html
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
new file mode 100644
index 000000000..64e5d14a3
--- /dev/null
+++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
@@ -0,0 +1,167 @@
+atomic-openshift-installer(1)
+=============================
+:man source: atomic-openshift-utils
+:man version: %VERSION%
+:man manual: atomic-openshift-installer
+
+
+NAME
+----
+atomic-openshift-installer - Interactive OpenShift Container Platform (OCP) installer
+
+
+SYNOPSIS
+--------
+atomic-openshift-installer [OPTIONS] COMMAND [OPTS]
+
+
+DESCRIPTION
+-----------
+
+**atomic-openshift-installer** makes the process for installing OCP
+easier by interactively gathering the data needed to run on each
+host. It can also be run in unattended mode if provided with a
+configuration file.
+
+
+OPTIONS
+-------
+
+The following options are common to all commands.
+
+*-u*, *--unattended*::
+
+Run installer in **unattended** mode. You will not be prompted to
+answer any questions.
+
+
+*-c*, *--configuration* 'PATH'::
+
+Provide an alternate 'PATH' to an 'installer.cfg.yml' file.
+
+
+*-a* 'DIRECTORY', *--ansible-playbook-directory* 'DIRECTORY'::
+
+Manually set the 'DIRECTORY' in which to look for Ansible playbooks.
+
+
+*--ansible-log-path* 'PATH'::
+
+Specify the 'PATH' of the directory in which to save Ansible logs.
+
+
+*-v*, *--verbose*::
+
+Run the installer with more verbosity.
+
+
+*-d*, *--debug*::
+
+Enable installer debugging. Logs are saved in '/tmp/installer.txt'.
+
+
+*-h*, *--help*::
+
+Show the usage help and exit.
+
+
+COMMANDS
+--------
+
+**atomic-openshift-installer** has three modes of operation:
+
+* **install**
+* **uninstall**
+* **upgrade**
+
+The options specific to each command are described in the following
+sections.
+
+
+
+INSTALL
+-------
+
+The **install** command will guide you through steps required to
+install an OCP cluster. After all of the required information has been
+collected (target hosts, storage options, high-availability), the
+installation will begin.
+
+*-f*, *--force*::
+
+Forces an installation. This means that hosts with existing
+installations will be reinstalled if required.
+
+*--gen-inventory*::
+
+Generate an Ansible inventory file and exit. The default location for
+the inventory file is '~/.config/openshift/hosts'.
+
+
+UNINSTALL
+---------
+
+The **uninstall** command will uninstall OCP from your target
+hosts. This command has no additional options.
+
+
+UPGRADE
+-------
+
+The **upgrade** command will upgrade a cluster of hosts to a newer
+version of OCP.
+
+*-l*, *--latest-minor*::
+
+Upgrade to the latest minor version. For example, if you are running
+version **3.2.1** then this could upgrade you to **3.2.2**.
+
+*-n*, *--next-major*::
+
+Upgrade to the latest major version. For example, if you are running
+version **3.2** then this could upgrade you to **3.3**.
+
+
+
+FILES
+-----
+
+*~/.config/openshift/installer.cfg.yml* -- Installer configuration
+ file. Can be used to generate an inventory later or start an
+ unattended installation.
+
+*~/.config/openshift/hosts* -- Generated Ansible inventory file. Used
+ to run the Ansible playbooks for install, uninstall, and upgrades.
+
+*/tmp/ansible.log* -- The default location of the ansible log file.
+
+*/tmp/installer.txt* -- The location of the log file for debugging the
+ installer.
+
+
+AUTHOR
+------
+
+Red Hat OpenShift Productization team
+
+For a complete list of contributors, please visit the GitHub charts
+page.
+
+
+
+COPYRIGHT
+---------
+Copyright © 2016 Red Hat, Inc.
+
+**atomic-openshift-installer** is released under the terms of the ASL
+2.0 license.
+
+
+
+SEE ALSO
+--------
+*ansible*(1), *ansible-playbook*(1)
+
+*The openshift-ansible GitHub Project* -- <https://github.com/openshift/openshift-ansible/>
+
+*The atomic-openshift-installer Documentation* -- <https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html>
diff --git a/utils/etc/ansible-quiet.cfg b/utils/etc/ansible-quiet.cfg
new file mode 100644
index 000000000..0eb0efa49
--- /dev/null
+++ b/utils/etc/ansible-quiet.cfg
@@ -0,0 +1,33 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+forks = 10
+host_key_checking = False
+nocows = 1
+
+retry_files_enabled = False
+
+deprecation_warnings=False
+
+# Need to handle:
+# inventory - derive from OO_ANSIBLE_DIRECTORY env var
+# callback_plugins - derive from pkg_resource.resource_filename
+# private_key_file - prompt if missing
+# remote_tmp - set if provided by user (cli)
+# ssh_args - set if provided by user (cli)
+# control_path
+
+stdout_callback = openshift_quick_installer
+callback_plugins = /usr/share/ansible_plugins/callback_plugins
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
index a53ab6cb1..3425e7e62 100644
--- a/utils/etc/ansible.cfg
+++ b/utils/etc/ansible.cfg
@@ -19,10 +19,12 @@ nocows = 1
retry_files_enabled = False
+deprecation_warnings = False
+
# Need to handle:
# inventory - derive from OO_ANSIBLE_DIRECTORY env var
# callback_plugins - derive from pkg_resource.resource_filename
# private_key_file - prompt if missing
# remote_tmp - set if provided by user (cli)
# ssh_args - set if provided by user (cli)
-# control_path \ No newline at end of file
+# control_path
diff --git a/utils/setup.py b/utils/setup.py
index eac1b4b2e..563897bb1 100644
--- a/utils/setup.py
+++ b/utils/setup.py
@@ -62,7 +62,7 @@ setup(
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
- 'ooinstall': ['ansible.cfg', 'ansible_plugins/*'],
+ 'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
# Although 'package_data' is the preferred approach, in some case you may
diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in
index d4153e738..216f57e9c 100644
--- a/utils/src/MANIFEST.in
+++ b/utils/src/MANIFEST.in
@@ -7,3 +7,4 @@ include DESCRIPTION.rst
# it's already declared in setup.py
include ooinstall/*
include ansible.cfg
+include ansible-quiet.cfg
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 85b4d29cb..347ae7ec9 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -25,6 +25,7 @@ installer_file_handler.setLevel(logging.DEBUG)
installer_log.addHandler(installer_file_handler)
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
+QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
@@ -750,7 +751,9 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
hosts_to_run_on.remove(host)
# Handle the cases where we know about uninstalled systems
- if len(uninstalled_hosts) > 0:
+ # TODO: This logic is getting hard to understand.
+ # we should revise all this to be cleaner.
+ if not force and len(uninstalled_hosts) > 0:
for uninstalled_host in uninstalled_hosts:
click.echo("{} is currently uninstalled".format(uninstalled_host))
# Fall through
@@ -815,12 +818,6 @@ def set_infra_nodes(hosts):
# callback=validate_ansible_dir,
default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
-@click.option('--ansible-config',
- type=click.Path(file_okay=True,
- dir_okay=False,
- writable=True,
- readable=True),
- default=None)
@click.option('--ansible-log-path',
type=click.Path(file_okay=True,
dir_okay=False,
@@ -836,7 +833,7 @@ def set_infra_nodes(hosts):
# pylint: disable=too-many-arguments
# pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose, debug):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug):
"""
atomic-openshift-installer makes the process for installing OSE or AEP
easier by interactively gathering the data needed to run on each host.
@@ -855,7 +852,6 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
ctx.obj = {}
ctx.obj['unattended'] = unattended
ctx.obj['configuration'] = configuration
- ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
@@ -876,13 +872,13 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
oo_cfg.ansible_playbook_directory = ansible_playbook_directory
ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
- if ctx.obj['ansible_config']:
- oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
- elif 'ansible_config' not in oo_cfg.settings and \
- os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ if os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
+ if os.path.exists(QUIET_ANSIBLE_CONFIG):
+ oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG
+
oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
ctx.obj['oo_cfg'] = oo_cfg
@@ -1084,7 +1080,6 @@ more:
http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
"""
click.echo(message)
- click.pause()
cli.add_command(install)
cli.add_command(upgrade)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 393b36f6f..697ac9c08 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -12,7 +12,6 @@ installer_log = logging.getLogger('installer')
CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_callback_facts_yaml',
- 'ansible_config',
'ansible_inventory_path',
'ansible_log_path',
'deployment',
@@ -27,6 +26,19 @@ DEPLOYMENT_VARIABLES_BLACKLIST = [
'roles',
]
+HOST_VARIABLES_BLACKLIST = [
+ 'ip',
+ 'public_ip',
+ 'hostname',
+ 'public_hostname',
+ 'node_labels',
+ 'containerized',
+ 'preconfigured',
+ 'schedulable',
+ 'other_variables',
+ 'roles',
+]
+
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
@@ -67,7 +79,7 @@ class Host(object):
self.containerized = kwargs.get('containerized', False)
self.node_labels = kwargs.get('node_labels', '')
- # allowable roles: master, node, etcd, storage, master_lb, new
+ # allowable roles: master, node, etcd, storage, master_lb
self.roles = kwargs.get('roles', [])
self.other_variables = kwargs.get('other_variables', {})
@@ -87,11 +99,13 @@ class Host(object):
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to',
- 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels',
- 'other_variables']:
+ 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', ]:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
+ for variable, value in self.other_variables.iteritems():
+ d[variable] = value
+
return d
def is_master(self):
@@ -203,7 +217,6 @@ class OOConfig(object):
role_list = loaded_config['deployment']['roles']
except KeyError as e:
print_read_config_error("No such key: {}".format(e), self.config_path)
- print "Error loading config, required key missing: {}".format(e)
sys.exit(0)
for setting in CONFIG_PERSIST_SETTINGS:
@@ -238,6 +251,10 @@ class OOConfig(object):
# Parse the hosts into DTO objects:
for host in host_list:
+ host['other_variables'] = {}
+ for variable, value in host.iteritems():
+ if variable not in HOST_VARIABLES_BLACKLIST:
+ host['other_variables'][variable] = value
self.deployment.hosts.append(Host(**host))
# Parse the roles into Objects
@@ -308,6 +325,12 @@ class OOConfig(object):
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
+ installer_log.debug("We think the ansible plugins directory should be: %s (it is not already set)",
+ self.settings['ansible_plugins_directory'])
+ else:
+ installer_log.debug("The ansible plugins directory is already set: %s",
+ self.settings['ansible_plugins_directory'])
+
if 'version' not in self.settings:
self.settings['version'] = 'v2'
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 75d26c10a..80a79a6d2 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -7,6 +7,7 @@ import os
import logging
import yaml
from ooinstall.variants import find_variant
+from ooinstall.utils import debug_env
installer_log = logging.getLogger('installer')
@@ -30,6 +31,14 @@ VARIABLES_MAP = {
'proxy_exclude_hosts': 'openshift_no_proxy',
}
+HOST_VARIABLES_MAP = {
+ 'ip': 'openshift_ip',
+ 'public_ip': 'openshift_public_ip',
+ 'hostname': 'openshift_hostname',
+ 'public_hostname': 'openshift_public_hostname',
+ 'containerized': 'containerized',
+}
+
def set_config(cfg):
global CFG
@@ -175,7 +184,6 @@ def write_proxy_settings(base_inventory):
pass
-# pylint: disable=too-many-branches
def write_host(host, role, inventory, schedulable=None):
global CFG
@@ -183,22 +191,16 @@ def write_host(host, role, inventory, schedulable=None):
return
facts = ''
- if host.ip:
- facts += ' openshift_ip={}'.format(host.ip)
- if host.public_ip:
- facts += ' openshift_public_ip={}'.format(host.public_ip)
- if host.hostname:
- facts += ' openshift_hostname={}'.format(host.hostname)
- if host.public_hostname:
- facts += ' openshift_public_hostname={}'.format(host.public_hostname)
- if host.containerized:
- facts += ' containerized={}'.format(host.containerized)
+ for prop in HOST_VARIABLES_MAP:
+ if getattr(host, prop):
+ facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
+
if host.other_variables:
for variable, value in host.other_variables.iteritems():
facts += " {}={}".format(variable, value)
- if host.node_labels:
- if role == 'node':
- facts += ' openshift_node_labels="{}"'.format(host.node_labels)
+
+ if host.node_labels and role == 'node':
+ facts += ' openshift_node_labels="{}"'.format(host.node_labels)
# Distinguish between three states, no schedulability specified (use default),
# explicitly set to True, or explicitly set to False:
@@ -225,6 +227,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
Retrieves system facts from the remote systems.
"""
installer_log.debug("Inside load_system_facts")
+ installer_log.debug("load_system_facts will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
+
FNULL = open(os.devnull, 'w')
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
@@ -232,6 +237,8 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
'--inventory-file={}'.format(inventory_file),
os_facts_path])
installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
+ installer_log.debug("Subprocess will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
status = subprocess.call(args, env=env_vars, stdout=FNULL)
if status != 0:
installer_log.debug("Exit status from subprocess was not 0")
@@ -280,17 +287,24 @@ def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
- if 'ansible_config' in CFG.settings:
- facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
def run_ansible(playbook, inventory, env_vars, verbose=False):
+ installer_log.debug("run_ansible will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
+
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory),
playbook])
+ installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
return subprocess.call(args, env=env_vars)
diff --git a/utils/src/ooinstall/utils.py b/utils/src/ooinstall/utils.py
new file mode 100644
index 000000000..eb27a57e4
--- /dev/null
+++ b/utils/src/ooinstall/utils.py
@@ -0,0 +1,10 @@
+import logging
+
+installer_log = logging.getLogger('installer')
+
+
+def debug_env(env):
+ for k in sorted(env.keys()):
+ if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"):
+ installer_log.debug("{key}: {value}".format(
+ key=k, value=env[k]))
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 6d9d443ff..34392777b 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -599,82 +599,96 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
- @patch('ooinstall.openshift_ansible.run_ansible')
- @patch('ooinstall.openshift_ansible.load_system_facts')
- def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
- load_facts_mock.return_value = (MOCK_FACTS, 0)
- run_ansible_mock.return_value = 0
-
- config = SAMPLE_CONFIG % 'openshift-enterprise'
-
- self._ansible_config_test(load_facts_mock, run_ansible_mock,
- config, None, None)
-
- @patch('ooinstall.openshift_ansible.run_ansible')
- @patch('ooinstall.openshift_ansible.load_system_facts')
- def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
- load_facts_mock.return_value = (MOCK_FACTS, 0)
- run_ansible_mock.return_value = 0
-
- config = SAMPLE_CONFIG % 'openshift-enterprise'
- ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
-
- self._ansible_config_test(load_facts_mock, run_ansible_mock,
- config, ansible_config, ansible_config)
-
- @patch('ooinstall.openshift_ansible.run_ansible')
- @patch('ooinstall.openshift_ansible.load_system_facts')
- def test_ansible_config_specified_in_installer_config(self,
- load_facts_mock, run_ansible_mock):
-
- load_facts_mock.return_value = (MOCK_FACTS, 0)
- run_ansible_mock.return_value = 0
-
- ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
- config = SAMPLE_CONFIG % 'openshift-enterprise'
- config = "%s\nansible_config: %s" % (config, ansible_config)
- self._ansible_config_test(load_facts_mock, run_ansible_mock,
- config, None, ansible_config)
-
- #pylint: disable=too-many-arguments
- # This method allows for drastically simpler tests to write, and the args
- # are all useful.
- def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
- installer_config, ansible_config_cli=None, expected_result=None):
- """
- Utility method for testing the ways you can specify the ansible config.
- """
-
- load_facts_mock.return_value = (MOCK_FACTS, 0)
- run_ansible_mock.return_value = 0
-
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), installer_config)
-
- self.cli_args.extend(["-c", config_file])
- if ansible_config_cli:
- self.cli_args.extend(["--ansible-config", ansible_config_cli])
- self.cli_args.append("install")
- result = self.runner.invoke(cli.cli, self.cli_args)
- self.assert_result(result, 0)
-
- # Test the env vars for facts playbook:
- facts_env_vars = load_facts_mock.call_args[0][2]
- if expected_result:
- self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
- else:
- # If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
- facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
-
- # Test the env vars for main playbook:
- env_vars = run_ansible_mock.call_args[0][2]
- if expected_result:
- self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
- else:
- # If user running test has rpm installed, this might be set to default:
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
- env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+ # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while
+ # we wait to see if anyone notices that we took away their ability
+ # to set the ansible_config parameter in the command line options
+ # and in the installer config file.
+ #
+ # We have removed the ability to set the ansible config file
+ # manually so that our new quieter output mode is the default and
+ # only output mode.
+ #
+ # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install
+ # should only output relevant information.
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, None)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, ansible_config, ansible_config)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_in_installer_config(self,
+ # load_facts_mock, run_ansible_mock):
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # config = "%s\nansible_config: %s" % (config, ansible_config)
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, ansible_config)
+
+ # #pylint: disable=too-many-arguments
+ # # This method allows for drastically simpler tests to write, and the args
+ # # are all useful.
+ # def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
+ # installer_config, ansible_config_cli=None, expected_result=None):
+ # """
+ # Utility method for testing the ways you can specify the ansible config.
+ # """
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config_file = self.write_config(os.path.join(self.work_dir,
+ # 'ooinstall.conf'), installer_config)
+
+ # self.cli_args.extend(["-c", config_file])
+ # if ansible_config_cli:
+ # self.cli_args.extend(["--ansible-config", ansible_config_cli])
+ # self.cli_args.append("install")
+ # result = self.runner.invoke(cli.cli, self.cli_args)
+ # self.assert_result(result, 0)
+
+ # # Test the env vars for facts playbook:
+ # facts_env_vars = load_facts_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
+ # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # # Test the env vars for main playbook:
+ # env_vars = run_ansible_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # #
+ # # By default we will use the quiet config
+ # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG)
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index b5068cc14..56fd82408 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -2,6 +2,7 @@
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+import cStringIO
import os
import unittest
import tempfile
@@ -9,6 +10,7 @@ import shutil
import yaml
from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
+import ooinstall.openshift_ansible
SAMPLE_CONFIG = """
variant: openshift-enterprise
@@ -224,3 +226,81 @@ class HostTests(OOInstallFixture):
'public_hostname': 'a.example.com',
}
self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_inventory_file_quotes_node_labels(self):
+ """Verify a host entry wraps openshift_node_labels value in double quotes"""
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'connect_to': 'a-private.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'new_host': True,
+ 'roles': ['node'],
+ 'node_labels': {
+ 'region': 'infra'
+ },
+
+ }
+
+ new_node = Host(**yaml_props)
+ inventory = cStringIO.StringIO()
+ # This is what the 'write_host' function generates. write_host
+ # has no return value, it just writes directly to the file
+ # 'inventory' which in this test-case is a StringIO object
+ ooinstall.openshift_ansible.write_host(
+ new_node,
+ 'node',
+ inventory,
+ schedulable=True)
+ # read the value of what was written to the inventory "file"
+ legacy_inventory_line = inventory.getvalue()
+
+ # Given the `yaml_props` above we should see a line like this:
+ # openshift_node_labels="{'region': 'infra'}"
+ node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash
+ node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash
+
+ # The good line is present in the written inventory line
+ self.assertIn(node_labels_expected, legacy_inventory_line)
+ # An unquoted version is not present
+ self.assertNotIn(node_labels_bad, legacy_inventory_line)
+
+
+ # def test_new_write_inventory_same_as_legacy(self):
+ # """Verify the original write_host function produces the same output as the new method"""
+ # yaml_props = {
+ # 'ip': '192.168.0.1',
+ # 'hostname': 'a.example.com',
+ # 'connect_to': 'a-private.example.com',
+ # 'public_ip': '192.168.0.1',
+ # 'public_hostname': 'a.example.com',
+ # 'new_host': True,
+ # 'roles': ['node'],
+ # 'other_variables': {
+ # 'zzz': 'last',
+ # 'foo': 'bar',
+ # 'aaa': 'first',
+ # },
+ # }
+
+ # new_node = Host(**yaml_props)
+ # inventory = cStringIO.StringIO()
+
+ # # This is what the original 'write_host' function will
+ # # generate. write_host has no return value, it just writes
+ # # directly to the file 'inventory' which in this test-case is
+ # # a StringIO object
+ # ooinstall.openshift_ansible.write_host(
+ # new_node,
+ # 'node',
+ # inventory,
+ # schedulable=True)
+ # legacy_inventory_line = inventory.getvalue()
+
+ # # This is what the new method in the Host class generates
+ # new_inventory_line = new_node.inventory_string('node', schedulable=True)
+
+ # self.assertEqual(
+ # legacy_inventory_line,
+ # new_inventory_line)