summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
Diffstat (limited to 'utils')
-rw-r--r--utils/.gitignore48
-rw-r--r--utils/Makefile112
-rw-r--r--utils/README.md41
-rw-r--r--utils/docs/config.md85
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1200
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in173
-rw-r--r--utils/etc/ansible-quiet.cfg33
-rw-r--r--utils/etc/ansible.cfg30
-rw-r--r--utils/setup.cfg5
-rw-r--r--utils/setup.py80
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh93
-rw-r--r--utils/site_assets/oo_install_launcher.README.txt22
-rw-r--r--utils/src/DESCRIPTION.rst13
-rw-r--r--utils/src/MANIFEST.in10
-rw-r--r--utils/src/ooinstall/__init__.py1
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py94
-rw-r--r--utils/src/ooinstall/cli_installer.py1186
-rw-r--r--utils/src/ooinstall/oo_config.py450
-rw-r--r--utils/src/ooinstall/openshift_ansible.py339
-rw-r--r--utils/src/ooinstall/utils.py21
-rw-r--r--utils/src/ooinstall/variants.py96
-rw-r--r--utils/test-requirements.txt12
-rw-r--r--utils/test/__init__.py0
-rw-r--r--utils/test/cli_installer_tests.py1132
-rw-r--r--utils/test/fixture.py254
-rw-r--r--utils/test/oo_config_tests.py306
-rw-r--r--utils/test/test_utils.py100
27 files changed, 4936 insertions, 0 deletions
diff --git a/utils/.gitignore b/utils/.gitignore
new file mode 100644
index 000000000..facfeee54
--- /dev/null
+++ b/utils/.gitignore
@@ -0,0 +1,48 @@
+package/
+
+# Backup files
+*.~
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+.tox/
+.coverage
+.cache
+.noseids
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Sphinx documentation
+docs/_build/
+oo-install
+oo-installenv
+cover
diff --git a/utils/Makefile b/utils/Makefile
new file mode 100644
index 000000000..62f08f74b
--- /dev/null
+++ b/utils/Makefile
@@ -0,0 +1,112 @@
+########################################################
+
+# Makefile for OpenShift: Atomic Quick Installer
+#
+# useful targets (not all implemented yet!):
+# make clean -- Clean up garbage
+# make ci ------------------- Execute CI steps (for travis or jenkins)
+
+########################################################
+
+# > VARIABLE = value
+#
+# Normal setting of a variable - values within it are recursively
+# expanded when the variable is USED, not when it's declared.
+#
+# > VARIABLE := value
+#
+# Setting of a variable with simple expansion of the values inside -
+# values within it are expanded at DECLARATION time.
+
+########################################################
+
+
+NAME := oo-install
+TESTPACKAGE := oo-install
+SHORTNAME := ooinstall
+
+# This doesn't evaluate until it's called. The -D argument is the
+# directory of the target file ($@), kinda like `dirname`.
+ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
+MANPAGES := docs/man/man1/atomic-openshift-installer.1
+VERSION := 1.3
+
+PEPEXCLUDES := E501,E121,E124
+
+sdist: clean
+ python setup.py sdist
+ rm -fR $(SHORTNAME).egg-info
+
+clean:
+ @find . -type f -regex ".*\.py[co]$$" -delete
+ @find . -type f \( -name "*~" -or -name "#*" \) -delete
+ @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
+ @rm -fR $(NAME)env
+
+
+# To force a rebuild of the docs run 'touch' on any *.in file under
+# docs/man/man1/
+docs: $(MANPAGES)
+
+# Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more
+# recently than %.1.asciidoc.
+%.1.asciidoc: %.1.asciidoc.in
+ sed "s/%VERSION%/$(VERSION)/" $< > $@
+
+# Regenerate %.1 if %.1.asciidoc or VERSION has been modified more
+# recently than %.1. (Implicitly runs the %.1.asciidoc recipe)
+%.1: %.1.asciidoc
+ $(ASCII2MAN)
+
+viewcover:
+ xdg-open cover/index.html
+
+virtualenv:
+ @echo "#############################################"
+ @echo "# Creating a virtualenv"
+ @echo "#############################################"
+ virtualenv $(NAME)env
+ . $(NAME)env/bin/activate && pip install setuptools==17.1.1
+ . $(NAME)env/bin/activate && pip install -r test-requirements.txt
+# If there are any special things to install do it here
+# . $(NAME)env/bin/activate && INSTALL STUFF
+
+ci-unittests:
+ @echo "#############################################"
+ @echo "# Running Unit Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && nosetests -v --with-coverage --cover-html --cover-min-percentage=70 --cover-package=$(SHORTNAME) test/
+ @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
+
+ci-pylint:
+ @echo "#############################################"
+ @echo "# Running PyLint Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && python -m pylint --rcfile ../git/.pylintrc src/ooinstall/cli_installer.py src/ooinstall/oo_config.py src/ooinstall/openshift_ansible.py src/ooinstall/variants.py ../callback_plugins/openshift_quick_installer.py ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+
+ci-list-deps:
+ @echo "#############################################"
+ @echo "# Listing all pip deps"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pip freeze
+
+ci-pyflakes:
+ @echo "#################################################"
+ @echo "# Running Pyflakes Compliance Tests in virtualenv"
+ @echo "#################################################"
+ . $(NAME)env/bin/activate && pyflakes src/ooinstall/*.py
+ . $(NAME)env/bin/activate && pyflakes ../callback_plugins/openshift_quick_installer.py
+ . $(NAME)env/bin/activate && pyflakes ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+
+ci-pep8:
+ @echo "#############################################"
+ @echo "# Running PEP8 Compliance Tests in virtualenv"
+ @echo "#############################################"
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) src/$(SHORTNAME)/
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES) ../callback_plugins/openshift_quick_installer.py
+# This one excludes E402 because it is an ansible module and the
+# boilerplate import statement is expected to be at the bottom
+ . $(NAME)env/bin/activate && pep8 --ignore=$(PEPEXCLUDES),E402 ../roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+
+ci: clean virtualenv ci-list-deps ci-pep8 ci-pylint ci-pyflakes ci-unittests
+ :
diff --git a/utils/README.md b/utils/README.md
new file mode 100644
index 000000000..2abf2705e
--- /dev/null
+++ b/utils/README.md
@@ -0,0 +1,41 @@
+# Running Tests (NEW)
+
+Run the command:
+
+ make ci
+
+to run an array of unittests locally.
+
+You will get errors if the log files already exist and can not be
+written to by the current user (`/tmp/ansible.log` and
+`/tmp/installer.txt`). *We're working on it.*
+
+# Running From Source
+
+You will need to setup a **virtualenv** to run from source:
+
+ $ virtualenv oo-install
+ $ source ./oo-install/bin/activate
+ $ virtualenv --relocatable ./oo-install/
+ $ python setup.py install
+
+The virtualenv `bin` directory should now be at the start of your
+`$PATH`, and `oo-install` is ready to use from your shell.
+
+You can exit the virtualenv with:
+
+ $ deactivate
+
+# Testing (OLD)
+
+*This section is deprecated, but still works*
+
+First, run the **virtualenv setup steps** described above.
+
+Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
+
+$ pip install mock nose
+
+Then run the tests with:
+
+$ oo-install/bin/nosetests
diff --git a/utils/docs/config.md b/utils/docs/config.md
new file mode 100644
index 000000000..3677ffe2e
--- /dev/null
+++ b/utils/docs/config.md
@@ -0,0 +1,85 @@
+# oo-install Supported Configuration File
+
+Upon completion oo-install will write out a configuration file representing the settings that were gathered and used. This configuration file, or one crafted by hand, can be used to run or re-run the installer and add additional hosts, upgrade, or re-install.
+
+The default location this config file will be written to ~/.config/openshift/installer.cfg.yml.
+
+## Example
+
+```
+version: v2
+variant: openshift-enterprise
+variant_version: 3.3
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 24.222.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ containerized: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+```
+
+## Primary Settings
+
+### version
+
+Indicates the version of configuration this file was written with. Current implementation is v1.
+
+### variant
+
+The OpenShift variant to install. Currently valid options are:
+
+ * openshift-enterprise
+ * atomic-enterprise
+
+### variant_version (optional)
+
+Default: Latest version for your chosen variant.
+
+A version which must be valid for your selected variant. If not specified the latest will be assumed.
+
+Examples: 3.0, 3.1, etc.
+
+### hosts
+
+This section defines a list of the hosts you wish to install the OpenShift master/node service on.
+
+*ip* or *hostname* must be specified so the installer can connect to the system to gather facts before proceeding with the install.
+
+If *public_ip* or *public_hostname* are not specified, this information will be gathered from the facts and the user will be asked to confirm in an editor. For an unattended install, the installer will error out. (you must provide complete host records for an unattended install)
+
+*master* and *node* determine the type of services that will be installed. One of these must be set to true for the configuration file to be considered valid.
+
+*containerized* indicates you want to run OpenShift services in a container on this host.
+
+### ansible_ssh_user
+
+Default: root
+
+Defines the user ansible will use to ssh to remote systems for gathering facts and the installation.
+
+### ansible_log_path
+
+Default: /tmp/ansible.log
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1
new file mode 100644
index 000000000..072833ce8
--- /dev/null
+++ b/utils/docs/man/man1/atomic-openshift-installer.1
@@ -0,0 +1,200 @@
+'\" t
+.\" Title: atomic-openshift-installer
+.\" Author: [see the "AUTHOR" section]
+.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
+.\" Date: 10/20/2016
+.\" Manual: atomic-openshift-installer
+.\" Source: atomic-openshift-utils 1.3
+.\" Language: English
+.\"
+.TH "ATOMIC\-OPENSHIFT\-I" "1" "10/20/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer"
+.\" -----------------------------------------------------------------
+.\" * Define some portability stuff
+.\" -----------------------------------------------------------------
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.\" http://bugs.debian.org/507673
+.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
+.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.ie \n(.g .ds Aq \(aq
+.el .ds Aq '
+.\" -----------------------------------------------------------------
+.\" * set default formatting
+.\" -----------------------------------------------------------------
+.\" disable hyphenation
+.nh
+.\" disable justification (adjust text to left margin only)
+.ad l
+.\" -----------------------------------------------------------------
+.\" * MAIN CONTENT STARTS HERE *
+.\" -----------------------------------------------------------------
+.SH "NAME"
+atomic-openshift-installer \- Interactive OpenShift Container Platform (OCP) installer
+.SH "SYNOPSIS"
+.sp
+atomic\-openshift\-installer [OPTIONS] COMMAND [OPTS]
+.SH "DESCRIPTION"
+.sp
+\fBatomic\-openshift\-installer\fR makes the process for installing OCP easier by interactively gathering the data needed to run on each host\&. It can also be run in unattended mode if provided with a configuration file\&.
+.SH "OPTIONS"
+.sp
+The following options are common to all commands\&.
+.PP
+\fB\-u\fR, \fB\-\-unattended\fR
+.RS 4
+Run installer in
+\fBunattended\fR
+mode\&. You will not be prompted to answer any questions\&.
+.RE
+.PP
+\fB\-c\fR, \fB\-\-configuration\fR \fIPATH\fR
+.RS 4
+Provide an alternate
+\fIPATH\fR
+to an
+\fIinstaller\&.cfg\&.yml\fR
+file\&.
+.RE
+.PP
+\fB\-a\fR \fIDIRECTORY\fR, \fB\-\-ansible\-playbook\-directory\fR \fIDIRECTORY\fR
+.RS 4
+Manually set the
+\fIDIRECTORY\fR
+in which to look for Ansible playbooks\&.
+.RE
+.PP
+\fB\-\-ansible\-log\-path\fR \fIPATH\fR
+.RS 4
+Specify the
+\fIPATH\fR
+of the directory in which to save Ansible logs\&.
+.RE
+.PP
+\fB\-v\fR, \fB\-\-verbose\fR
+.RS 4
+Run the installer with more verbosity\&.
+.RE
+.PP
+\fB\-d\fR, \fB\-\-debug\fR
+.RS 4
+Enable installer debugging\&. Logs are saved in
+\fI/tmp/installer\&.txt\fR\&.
+.RE
+.PP
+\fB\-h\fR, \fB\-\-help\fR
+.RS 4
+Show the usage help and exit\&.
+.RE
+.SH "COMMANDS"
+.sp
+\fBatomic\-openshift\-installer\fR has three modes of operation:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBinstall\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBuninstall\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBupgrade\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+\fBscaleup\fR
+.RE
+.sp
+The options specific to each command are described in the following sections\&.
+.SH "INSTALL"
+.sp
+The \fBinstall\fR command will guide you through steps required to install an OCP cluster\&. After all of the required information has been collected (target hosts, storage options, high\-availability), the installation will begin\&.
+.PP
+\fB\-f\fR, \fB\-\-force\fR
+.RS 4
+Forces an installation\&. This means that hosts with existing installations will be reinstalled if required\&.
+.RE
+.PP
+\fB\-\-gen\-inventory\fR
+.RS 4
+Generate an Ansible inventory file and exit\&. The default location for the inventory file is
+\fI~/\&.config/openshift/hosts\fR\&.
+.RE
+.SH "UNINSTALL"
+.sp
+The \fBuninstall\fR command will uninstall OCP from your target hosts\&. This command has no additional options\&.
+.SH "UPGRADE"
+.sp
+The \fBupgrade\fR command will upgrade a cluster of hosts to a newer version of OCP\&.
+.PP
+\fB\-l\fR, \fB\-\-latest\-minor\fR
+.RS 4
+Upgrade to the latest minor version\&. For example, if you are running version
+\fB3\&.2\&.1\fR
+then this could upgrade you to
+\fB3\&.2\&.2\fR\&.
+.RE
+.PP
+\fB\-n\fR, \fB\-\-next\-major\fR
+.RS 4
+Upgrade to the latest major version\&. For example, if you are running version
+\fB3\&.2\fR
+then this could upgrade you to
+\fB3\&.3\fR\&.
+.RE
+.SH "SCALEUP"
+.sp
+The \fBscaleup\fR command is used to add new nodes to an existing cluster\&. This command has no additional options\&.
+.SH "FILES"
+.sp
+\fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&.
+.sp
+\fB~/\&.config/openshift/hosts\fR \(em Generated Ansible inventory file\&. Used to run the Ansible playbooks for install, uninstall, and upgrades\&.
+.sp
+\fB/tmp/ansible\&.log\fR \(em The default location of the ansible log file\&.
+.sp
+\fB/tmp/installer\&.txt\fR \(em The location of the log file for debugging the installer\&.
+.SH "AUTHOR"
+.sp
+Red Hat OpenShift Productization team
+.sp
+For a complete list of contributors, please visit the GitHub charts page\&.
+.SH "COPYRIGHT"
+.sp
+Copyright \(co 2016 Red Hat, Inc\&.
+.sp
+\fBatomic\-openshift\-installer\fR is released under the terms of the ASL 2\&.0 license\&.
+.SH "SEE ALSO"
+.sp
+\fBansible\fR(1), \fBansible\-playbook\fR(1)
+.sp
+\fBThe openshift\-ansible GitHub Project\fR \(em https://github\&.com/openshift/openshift\-ansible/
+.sp
+\fBThe atomic\-openshift\-installer Documentation\fR \(em https://docs\&.openshift\&.com/container\-platform/3\&.3/install_config/install/quick_install\&.html
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
new file mode 100644
index 000000000..9b02c4d14
--- /dev/null
+++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
@@ -0,0 +1,173 @@
+atomic-openshift-installer(1)
+=============================
+:man source: atomic-openshift-utils
+:man version: %VERSION%
+:man manual: atomic-openshift-installer
+
+
+NAME
+----
+atomic-openshift-installer - Interactive OpenShift Container Platform (OCP) installer
+
+
+SYNOPSIS
+--------
+atomic-openshift-installer [OPTIONS] COMMAND [OPTS]
+
+
+DESCRIPTION
+-----------
+
+**atomic-openshift-installer** makes the process for installing OCP
+easier by interactively gathering the data needed to run on each
+host. It can also be run in unattended mode if provided with a
+configuration file.
+
+
+OPTIONS
+-------
+
+The following options are common to all commands.
+
+*-u*, *--unattended*::
+
+Run installer in **unattended** mode. You will not be prompted to
+answer any questions.
+
+
+*-c*, *--configuration* 'PATH'::
+
+Provide an alternate 'PATH' to an 'installer.cfg.yml' file.
+
+
+*-a* 'DIRECTORY', *--ansible-playbook-directory* 'DIRECTORY'::
+
+Manually set the 'DIRECTORY' in which to look for Ansible playbooks.
+
+
+*--ansible-log-path* 'PATH'::
+
+Specify the 'PATH' of the directory in which to save Ansible logs.
+
+
+*-v*, *--verbose*::
+
+Run the installer with more verbosity.
+
+
+*-d*, *--debug*::
+
+Enable installer debugging. Logs are saved in '/tmp/installer.txt'.
+
+
+*-h*, *--help*::
+
+Show the usage help and exit.
+
+
+COMMANDS
+--------
+
+**atomic-openshift-installer** has three modes of operation:
+
+* **install**
+* **uninstall**
+* **upgrade**
+* **scaleup**
+
+The options specific to each command are described in the following
+sections.
+
+
+
+INSTALL
+-------
+
+The **install** command will guide you through steps required to
+install an OCP cluster. After all of the required information has been
+collected (target hosts, storage options, high-availability), the
+installation will begin.
+
+*-f*, *--force*::
+
+Forces an installation. This means that hosts with existing
+installations will be reinstalled if required.
+
+*--gen-inventory*::
+
+Generate an Ansible inventory file and exit. The default location for
+the inventory file is '~/.config/openshift/hosts'.
+
+
+UNINSTALL
+---------
+
+The **uninstall** command will uninstall OCP from your target
+hosts. This command has no additional options.
+
+
+UPGRADE
+-------
+
+The **upgrade** command will upgrade a cluster of hosts to a newer
+version of OCP.
+
+*-l*, *--latest-minor*::
+
+Upgrade to the latest minor version. For example, if you are running
+version **3.2.1** then this could upgrade you to **3.2.2**.
+
+*-n*, *--next-major*::
+
+Upgrade to the latest major version. For example, if you are running
+version **3.2** then this could upgrade you to **3.3**.
+
+
+SCALEUP
+-------
+
+The **scaleup** command is used to add new nodes to an existing cluster.
+This command has no additional options.
+
+FILES
+-----
+
+*~/.config/openshift/installer.cfg.yml* -- Installer configuration
+ file. Can be used to generate an inventory later or start an
+ unattended installation.
+
+*~/.config/openshift/hosts* -- Generated Ansible inventory file. Used
+ to run the Ansible playbooks for install, uninstall, and upgrades.
+
+*/tmp/ansible.log* -- The default location of the ansible log file.
+
+*/tmp/installer.txt* -- The location of the log file for debugging the
+ installer.
+
+
+AUTHOR
+------
+
+Red Hat OpenShift Productization team
+
+For a complete list of contributors, please visit the GitHub charts
+page.
+
+
+
+COPYRIGHT
+---------
+Copyright © 2016 Red Hat, Inc.
+
+**atomic-openshift-installer** is released under the terms of the ASL
+2.0 license.
+
+
+
+SEE ALSO
+--------
+*ansible*(1), *ansible-playbook*(1)
+
+*The openshift-ansible GitHub Project* -- <https://github.com/openshift/openshift-ansible/>
+
+*The atomic-openshift-installer Documentation* -- <https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html>
diff --git a/utils/etc/ansible-quiet.cfg b/utils/etc/ansible-quiet.cfg
new file mode 100644
index 000000000..0eb0efa49
--- /dev/null
+++ b/utils/etc/ansible-quiet.cfg
@@ -0,0 +1,33 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+forks = 10
+host_key_checking = False
+nocows = 1
+
+retry_files_enabled = False
+
+deprecation_warnings=False
+
+# Need to handle:
+# inventory - derive from OO_ANSIBLE_DIRECTORY env var
+# callback_plugins - derive from pkg_resource.resource_filename
+# private_key_file - prompt if missing
+# remote_tmp - set if provided by user (cli)
+# ssh_args - set if provided by user (cli)
+# control_path
+
+stdout_callback = openshift_quick_installer
+callback_plugins = /usr/share/ansible_plugins/callback_plugins
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
new file mode 100644
index 000000000..3425e7e62
--- /dev/null
+++ b/utils/etc/ansible.cfg
@@ -0,0 +1,30 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+forks = 10
+host_key_checking = False
+nocows = 1
+
+retry_files_enabled = False
+
+deprecation_warnings = False
+
+# Need to handle:
+# inventory - derive from OO_ANSIBLE_DIRECTORY env var
+# callback_plugins - derive from pkg_resource.resource_filename
+# private_key_file - prompt if missing
+# remote_tmp - set if provided by user (cli)
+# ssh_args - set if provided by user (cli)
+# control_path
diff --git a/utils/setup.cfg b/utils/setup.cfg
new file mode 100644
index 000000000..79bc67848
--- /dev/null
+++ b/utils/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
diff --git a/utils/setup.py b/utils/setup.py
new file mode 100644
index 000000000..7909321c9
--- /dev/null
+++ b/utils/setup.py
@@ -0,0 +1,80 @@
+"""A setuptools based setup module.
+
+"""
+
+# Always prefer setuptools over distutils
+from setuptools import setup
+
+setup(
+ name='ooinstall',
+
+ # Versions should comply with PEP440. For a discussion on single-sourcing
+ # the version across setup.py and the project code, see
+ # https://packaging.python.org/en/latest/single_source_version.html
+ version="3.0.0",
+
+ description="Ansible wrapper for OpenShift Enterprise 3 installation.",
+
+ # The project's main homepage.
+ url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
+
+ # Author details
+ author="openshift@redhat.com",
+ author_email="OpenShift",
+
+ # Choose your license
+ license="Apache 2.0",
+
+ # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: Utilities',
+ ],
+
+ # What does your project relate to?
+ keywords='oo-install setuptools development',
+
+ # You can just specify the packages manually here if your project is
+ # simple. Or you can use find_packages().
+ #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
+ packages=['ooinstall'],
+ package_dir={'ooinstall': 'src/ooinstall'},
+
+
+ # List run-time dependencies here. These will be installed by pip when
+ # your project is installed. For an analysis of "install_requires" vs pip's
+ # requirements files see:
+ # https://packaging.python.org/en/latest/requirements.html
+ install_requires=['click', 'PyYAML'],
+
+ # List additional groups of dependencies here (e.g. development
+ # dependencies). You can install these using the following syntax,
+ # for example:
+ # $ pip install -e .[dev,test]
+ #extras_require={
+ # 'dev': ['check-manifest'],
+ # 'test': ['coverage'],
+ #},
+
+ # If there are data files included in your packages that need to be
+ # installed, specify them here. If using Python 2.6 or less, then these
+ # have to be included in MANIFEST.in as well.
+ package_data={
+ 'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
+ },
+
+ tests_require=['nose'],
+
+ test_suite='nose.collector',
+
+ # To provide executable scripts, use entry points in preference to the
+ # "scripts" keyword. Entry points provide cross-platform support and allow
+ # pip to create the appropriate form of executable for the target platform.
+ entry_points={
+ 'console_scripts': [
+ 'oo-install=ooinstall.cli_installer:cli',
+ ],
+ },
+)
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
new file mode 100755
index 000000000..3c5614d39
--- /dev/null
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+# Grab command-line arguments
+cmdlnargs="$@"
+
+: ${OO_INSTALL_KEEP_ASSETS:="false"}
+: ${OO_INSTALL_CONTEXT:="INSTALLCONTEXT"}
+: ${TMPDIR:=/tmp}
+: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
+[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+
+if rpm -q dnf;
+then
+ PKG_MGR="dnf"
+else
+ PKG_MGR="yum"
+fi
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ clear
+ echo "Checking for necessary tools..."
+fi
+if [ -e /etc/redhat-release ]
+then
+ for i in python python-virtualenv openssh-clients gcc
+ do
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; }
+ done
+fi
+for i in python virtualenv ssh gcc
+do
+ command -v $i >/dev/null 2>&1 || { echo >&2 "OpenShift installation requires $i on the PATH but it does not appear to be available. Correct this and rerun the installer."; exit 1; }
+done
+
+# All instances of INSTALLPKGNAME are replaced during packaging with the actual package name.
+if [[ -e ./INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using bundled assets."
+ fi
+ cp INSTALLPKGNAME.tgz ${TMPDIR}/INSTALLPKGNAME.tgz
+elif [[ $OO_INSTALL_KEEP_ASSETS == 'true' && -e ${TMPDIR}/INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using existing installer assets."
+ fi
+else
+ echo "Downloading oo-install package to ${TMPDIR}INSTALLPKGNAME.tgz..."
+ curl -s -o ${TMPDIR}INSTALLPKGNAME.tgz https://install.openshift.com/INSTALLVERPATHINSTALLPKGNAME.tgz
+fi
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Extracting oo-install to ${TMPDIR}INSTALLPKGNAME..."
+fi
+tar xzf ${TMPDIR}INSTALLPKGNAME.tgz -C ${TMPDIR} 2>&1 >> $OO_INSTALL_LOG
+
+echo "Preparing to install. This can take a minute or two..."
+virtualenv ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+cd ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+source ./bin/activate 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTALL_LOG
+
+# TODO: these deps should technically be handled as part of installing ooinstall
+pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG
+echo "Installation preparation done!" 2>&1 >> $OO_INSTALL_LOG
+
+echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Starting oo-install..." 2>&1 >> $OO_INSTALL_LOG
+else
+ clear
+fi
+oo-install $cmdlnargs --ansible-playbook-directory ${TMPDIR}/INSTALLPKGNAME/openshift-ansible-*/ --ansible-log-path $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_KEEP_ASSETS == 'true' ]
+then
+ echo "Keeping temporary assets in ${TMPDIR}"
+else
+ echo "Removing temporary assets."
+ rm -rf ${TMPDIR}INSTALLPKGNAME
+ rm -rf ${TMPDIR}INSTALLPKGNAME.tgz
+fi
+
+echo "Please see $OO_INSTALL_LOG for full output."
+
+exit
diff --git a/utils/site_assets/oo_install_launcher.README.txt b/utils/site_assets/oo_install_launcher.README.txt
new file mode 100644
index 000000000..46947b481
--- /dev/null
+++ b/utils/site_assets/oo_install_launcher.README.txt
@@ -0,0 +1,22 @@
+= oo-install Portable Installer Package
+
+This package is identical to the installer package that can be downloaded
+and executed directly from https://install.openshift.com/.
+
+NOTE: It will still be necessary for this installer to download RPMs from the
+internet, unless you have already set up the necessary local repositories.
+
+To run the installer from this package, run the following command:
+
+$ ./LAUNCHERNAME
+
+That command script and the packaged zip file can be burned to a CD or
+written to a USB drive and used to run the oo-install utility in places
+where the web-based installer is not reachable.
+
+All of the command-line arguments supported by oo-install can be passed
+to this launcher application.
+
+For more information for Enterprise installs, refer to the OpenShift
+Enterprise Administrator Guide:
+https://docs.openshift.com/enterprise/latest/welcome/index.html
diff --git a/utils/src/DESCRIPTION.rst b/utils/src/DESCRIPTION.rst
new file mode 100644
index 000000000..68b3a57f2
--- /dev/null
+++ b/utils/src/DESCRIPTION.rst
@@ -0,0 +1,13 @@
+A sample Python project
+=======================
+
+This is the description file for the project.
+
+The file should use UTF-8 encoding and be written using ReStructured Text. It
+will be used to generate the project webpage on PyPI, and should be written for
+that purpose.
+
+Typical contents for this file would include an overview of the project, basic
+usage examples, etc. Generally, including the project changelog in here is not
+a good idea, although a simple "What's New" section for the most recent version
+may be appropriate.
diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in
new file mode 100644
index 000000000..216f57e9c
--- /dev/null
+++ b/utils/src/MANIFEST.in
@@ -0,0 +1,10 @@
+include DESCRIPTION.rst
+
+# Include the test suite (FIXME: does not work yet)
+# recursive-include tests *
+
+# If using Python 2.6 or less, then have to include package data, even though
+# it's already declared in setup.py
+include ooinstall/*
+include ansible.cfg
+include ansible-quiet.cfg
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
new file mode 100644
index 000000000..96e495e19
--- /dev/null
+++ b/utils/src/ooinstall/__init__.py
@@ -0,0 +1 @@
+# pylint: disable=missing-docstring
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
new file mode 100644
index 000000000..e51890a22
--- /dev/null
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -0,0 +1,94 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import os
+import yaml
+from ansible.plugins.callback import CallbackBase
+
+
+# pylint: disable=super-init-not-called
+class CallbackModule(CallbackBase):
+
+ def __init__(self):
+ ######################
+ # This is ugly stoopid. This should be updated in the following ways:
+ # 1) it should probably only be used for the
+ # openshift_facts.yml playbook, so maybe there's some way to check
+ # a variable that's set when that playbook is run?
+ try:
+ self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
+ except KeyError:
+ raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
+ 'variable must be set.')
+ self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
+ os.O_WRONLY)
+
+ def v2_on_any(self, *args, **kwargs):
+ pass
+
+ def v2_runner_on_failed(self, res, ignore_errors=False):
+ pass
+
+ # pylint: disable=protected-access
+ def v2_runner_on_ok(self, res):
+ abridged_result = res._result.copy()
+ # Collect facts result from playbooks/byo/openshift_facts.yml
+ if 'result' in abridged_result:
+ facts = abridged_result['result']['ansible_facts']['openshift']
+ hosts_yaml = {}
+ hosts_yaml[res._host.get_name()] = facts
+ os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
+
+ def v2_runner_on_skipped(self, res):
+ pass
+
+ def v2_runner_on_unreachable(self, res):
+ pass
+
+ def v2_runner_on_no_hosts(self, task):
+ pass
+
+ def v2_runner_on_async_poll(self, res):
+ pass
+
+ def v2_runner_on_async_ok(self, res):
+ pass
+
+ def v2_runner_on_async_failed(self, res):
+ pass
+
+ def v2_playbook_on_start(self, playbook):
+ pass
+
+ def v2_playbook_on_notify(self, res, handler):
+ pass
+
+ def v2_playbook_on_no_hosts_matched(self):
+ pass
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ pass
+
+ def v2_playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ # pylint: disable=too-many-arguments
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def v2_playbook_on_setup(self):
+ pass
+
+ def v2_playbook_on_import_for_host(self, res, imported_file):
+ pass
+
+ def v2_playbook_on_not_import_for_host(self, res, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ pass
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
new file mode 100644
index 000000000..7e5ad4144
--- /dev/null
+++ b/utils/src/ooinstall/cli_installer.py
@@ -0,0 +1,1186 @@
+# pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines
+
+import logging
+import os
+import sys
+
+import click
+from pkg_resources import parse_version
+from ooinstall import openshift_ansible, utils
+from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role
+from ooinstall.variants import find_variant, get_variant_version_combos
+
+INSTALLER_LOG = logging.getLogger('installer')
+INSTALLER_LOG.setLevel(logging.CRITICAL)
+INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt')
+INSTALLER_FILE_HANDLER.setFormatter(
+ logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
+# Example output:
+# 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
+INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG)
+INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER)
+
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
+QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
+DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
+
+UPGRADE_MAPPINGS = {
+ '3.0': {
+ 'minor_version': '3.0',
+ 'minor_playbook': 'v3_0_minor/upgrade.yml',
+ 'major_version': '3.1',
+ 'major_playbook': 'v3_0_to_v3_1/upgrade.yml',
+ },
+ '3.1': {
+ 'minor_version': '3.1',
+ 'minor_playbook': 'v3_1_minor/upgrade.yml',
+ 'major_playbook': 'v3_1_to_v3_2/upgrade.yml',
+ 'major_version': '3.2',
+ },
+ '3.2': {
+ 'minor_version': '3.2',
+ 'minor_playbook': 'v3_2/upgrade.yml',
+ 'major_playbook': 'v3_3/upgrade.yml',
+ 'major_version': '3.3',
+ },
+ '3.3': {
+ 'minor_version': '3.3',
+ 'minor_playbook': 'v3_3/upgrade.yml',
+ 'major_playbook': 'v3_4/upgrade.yml',
+ 'major_version': '3.4',
+ },
+ '3.4': {
+ 'minor_version': '3.4',
+ 'minor_playbook': 'v3_4/upgrade.yml',
+ },
+}
+
+
+def validate_ansible_dir(path):
+ if not path:
+ raise click.BadParameter('An Ansible path must be provided')
+ return path
+ # if not os.path.exists(path)):
+ # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
+
+
+def validate_prompt_hostname(hostname):
+ if hostname == '' or utils.is_valid_hostname(hostname):
+ return hostname
+ raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
+
+
+def get_ansible_ssh_user():
+ click.clear()
+ message = """
+This installation process involves connecting to remote hosts via ssh. Any
+account may be used. However, if a non-root account is used, then it must have
+passwordless sudo access.
+"""
+ click.echo(message)
+ return click.prompt('User for ssh access', default='root')
+
+
+def get_routingconfig_subdomain():
+ click.clear()
+ message = """
+You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value.
+"""
+ click.echo(message)
+ return click.prompt('New default subdomain (ENTER for none)', default='')
+
+
+def list_hosts(hosts):
+ hosts_idx = range(len(hosts))
+ for idx in hosts_idx:
+ click.echo(' {}: {}'.format(idx, hosts[idx]))
+
+
+def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
+ """
+ Collect host information from user. This will later be filled in using
+ Ansible.
+
+ Returns: a list of host information collected from the user
+ """
+ click.clear()
+ click.echo('*** Host Configuration ***')
+ message = """
+You must now specify the hosts that will compose your OpenShift cluster.
+
+Please enter an IP address or hostname to connect to for each system in the
+cluster. You will then be prompted to identify what role you want this system to
+serve in the cluster.
+
+OpenShift masters serve the API and web console and coordinate the jobs to run
+across the environment. Optionally, you can specify multiple master systems for
+a high-availability (HA) deployment. If you choose an HA deployment, then you
+are prompted to identify a *separate* system to act as the load balancer for
+your cluster once you define all masters and nodes.
+
+Any masters configured as part of this installation process are also
+configured as nodes. This enables the master to proxy to pods
+from the API. By default, this node is unschedulable, but this can be changed
+after installation with the 'oadm manage-node' command.
+
+OpenShift nodes provide the runtime environments for containers. They host the
+required services to be managed by the master.
+
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
+ """
+ click.echo(message)
+
+ hosts = []
+ roles = set(['master', 'node', 'storage', 'etcd'])
+ more_hosts = True
+ num_masters = 0
+ while more_hosts:
+ host_props = {}
+ host_props['roles'] = []
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_hostname)
+
+ if not masters_set:
+ if click.confirm('Will this host be an OpenShift master?'):
+ host_props['roles'].append('master')
+ host_props['roles'].append('etcd')
+ num_masters += 1
+
+ if oo_cfg.settings['variant_version'] == '3.0':
+ masters_set = True
+ host_props['roles'].append('node')
+
+ host_props['containerized'] = False
+ if oo_cfg.settings['variant_version'] != '3.0':
+ rpm_or_container = \
+ click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+
+ host_props['new_host'] = existing_env
+
+ host = Host(**host_props)
+
+ hosts.append(host)
+
+ if print_summary:
+ print_installation_summary(hosts, oo_cfg.settings['variant_version'])
+
+ # If we have one master, this is enough for an all-in-one deployment,
+ # thus we can start asking if you want to proceed. Otherwise we assume
+ # you must.
+ if masters_set or num_masters != 2:
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+
+ if num_masters > 2:
+ master_lb = collect_master_lb(hosts)
+ if master_lb:
+ hosts.append(master_lb)
+ roles.add('master_lb')
+ else:
+ set_cluster_hostname(oo_cfg)
+
+ if not existing_env:
+ collect_storage_host(hosts)
+
+ return hosts, roles
+
+
+# pylint: disable=too-many-branches
+def print_installation_summary(hosts, version=None, verbose=True):
+ """
+ Displays a summary of all hosts configured thus far, and what role each
+ will play.
+
+ Shows total nodes/masters, hints for performing/modifying the deployment
+ with additional setup, warnings for invalid or sub-optimal configurations.
+ """
+ click.clear()
+ click.echo('*** Installation Summary ***\n')
+ click.echo('Hosts:')
+ for host in hosts:
+ print_host_summary(hosts, host)
+
+ masters = [host for host in hosts if host.is_master()]
+ nodes = [host for host in hosts if host.is_node()]
+ dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()]
+ click.echo('')
+ click.echo('Total OpenShift masters: %s' % len(masters))
+ click.echo('Total OpenShift nodes: %s' % len(nodes))
+
+ if verbose:
+ if len(masters) == 1 and version != '3.0':
+ ha_hint_message = """
+NOTE: Add a total of 3 or more masters to perform an HA installation."""
+ click.echo(ha_hint_message)
+ elif len(masters) == 2:
+ min_masters_message = """
+WARNING: A minimum of 3 masters are required to perform an HA installation.
+Please add one more to proceed."""
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
+NOTE: Multiple masters specified, this will be an HA deployment with a separate
+etcd cluster. You will be prompted to provide the FQDN of a load balancer and
+a host for storage once finished entering hosts.
+ """
+ click.echo(ha_message)
+
+ dedicated_nodes_message = """
+WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated
+nodes are specified, each configured master will be marked as a schedulable
+node."""
+
+ min_ha_nodes_message = """
+WARNING: A minimum of 3 dedicated nodes are recommended for an HA
+deployment."""
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+ elif len(dedicated_nodes) < 3:
+ click.echo(min_ha_nodes_message)
+
+ click.echo('')
+
+
+def print_host_summary(all_hosts, host):
+ click.echo("- %s" % host.connect_to)
+ if host.is_master():
+ click.echo(" - OpenShift master")
+ if host.is_node():
+ if host.is_dedicated_node():
+ click.echo(" - OpenShift node (Dedicated)")
+ elif host.is_schedulable_node(all_hosts):
+ click.echo(" - OpenShift node")
+ else:
+ click.echo(" - OpenShift node (Unscheduled)")
+ if host.is_master_lb():
+ if host.preconfigured:
+ click.echo(" - Load Balancer (Preconfigured)")
+ else:
+ click.echo(" - Load Balancer (HAProxy)")
+ if host.is_etcd():
+ click.echo(" - Etcd")
+ if host.is_storage():
+ click.echo(" - Storage")
+ if host.new_host:
+ click.echo(" - NEW")
+
+
+def collect_master_lb(hosts):
+ """
+ Get a valid load balancer from the user and append it to the list of
+ hosts.
+
+ Ensure user does not specify a system already used as a master/node as
+ this is an invalid configuration.
+ """
+ message = """
+Setting up high-availability masters requires a load balancing solution.
+Please provide the FQDN of a host that will be configured as a proxy. This
+can be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that will have HAProxy installed on it.
+
+If the host provided is not yet configured, a reference HAProxy load
+balancer will be installed. It's important to note that while the rest of the
+environment will be fault-tolerant, this reference load balancer will not be.
+It can be replaced post-installation with a load balancer with the same
+hostname.
+"""
+ click.echo(message)
+ host_props = {}
+
+ # Using an embedded function here so we have access to the hosts list:
+ def validate_prompt_lb(hostname):
+ # Run the standard hostname check first:
+ hostname = validate_prompt_hostname(hostname)
+
+ # Make sure this host wasn't already specified:
+ for host in hosts:
+ if host.connect_to == hostname and (host.is_master() or host.is_node()):
+ raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
+ 'please specify a separate host' % hostname)
+ return hostname
+
+ lb_hostname = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
+ if lb_hostname:
+ host_props['connect_to'] = lb_hostname
+ install_haproxy = \
+ click.confirm('Should the reference HAProxy load balancer be installed on this host?')
+ host_props['preconfigured'] = not install_haproxy
+ host_props['roles'] = ['master_lb']
+ return Host(**host_props)
+ else:
+ return None
+
+
+def set_cluster_hostname(oo_cfg):
+ first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None)
+ message = """
+You have chosen to install a single master cluster (non-HA).
+
+In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that would have HAProxy installed on it.
+
+(Optional)
+If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default.
+"""
+ click.echo(message)
+ cluster_hostname = click.prompt('Enter hostname or IP address',
+ default=str(first_master))
+ oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname
+ oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname
+
+
+def collect_storage_host(hosts):
+ """
+ Get a valid host for storage from the user and append it to the list of
+ hosts.
+ """
+ message = """
+Setting up high-availability masters requires a storage host. Please provide a
+host that will be configured as a Registry Storage.
+
+Note: Containerized storage hosts are not currently supported.
+"""
+ click.echo(message)
+ host_props = {}
+
+ first_master = next(host for host in hosts if host.is_master())
+
+ hostname_or_ip = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_hostname,
+ default=first_master.connect_to)
+ existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
+ if existing and existing_host.is_node():
+ existing_host.roles.append('storage')
+ else:
+ host_props['connect_to'] = hostname_or_ip
+ host_props['preconfigured'] = False
+ host_props['roles'] = ['storage']
+ storage = Host(**host_props)
+ hosts.append(storage)
+
+
+def is_host_already_node_or_master(hostname, hosts):
+ is_existing = False
+ existing_host = None
+
+ for host in hosts:
+ if host.connect_to == hostname and (host.is_master() or host.is_node()):
+ is_existing = True
+ existing_host = host
+
+ return is_existing, existing_host
+
+
+def confirm_hosts_facts(oo_cfg, callback_facts):
+ hosts = oo_cfg.deployment.hosts
+ click.clear()
+ message = """
+The following is a list of the facts gathered from the provided hosts. The
+hostname for a system inside the cluster is often different from the hostname
+that is resolveable from command-line or web clients, therefore these settings
+cannot be validated automatically.
+
+For some cloud providers, the installer is able to gather metadata exposed in
+the instance, so reasonable defaults will be provided.
+
+Please confirm that they are correct before moving forward.
+
+"""
+ notes = """
+Format:
+
+connect_to,IP,public IP,hostname,public hostname
+
+Notes:
+ * The installation host is the hostname from the installer's perspective.
+ * The IP of the host should be the internal IP of the instance.
+ * The public IP should be the externally accessible IP associated with the instance
+ * The hostname should resolve to the internal IP from the instances
+ themselves.
+ * The public hostname should resolve to the external IP from hosts outside of
+ the cloud.
+"""
+
+ # For testing purposes we need to click.echo only once, so build up
+ # the message:
+ output = message
+
+ default_facts_lines = []
+ default_facts = {}
+ for host in hosts:
+ if host.preconfigured:
+ continue
+ try:
+ default_facts[host.connect_to] = {}
+ host.ip = callback_facts[host.connect_to]["common"]["ip"]
+ host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"]
+ host.hostname = callback_facts[host.connect_to]["common"]["hostname"]
+ host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"]
+ except KeyError:
+ click.echo("Problem fetching facts from {}".format(host.connect_to))
+ continue
+
+ default_facts_lines.append(",".join([host.connect_to,
+ host.ip,
+ host.public_ip,
+ host.hostname,
+ host.public_hostname]))
+ output = "%s\n%s" % (output, ",".join([host.connect_to,
+ host.ip,
+ host.public_ip,
+ host.hostname,
+ host.public_hostname]))
+
+ output = "%s\n%s" % (output, notes)
+ click.echo(output)
+ facts_confirmed = click.confirm("Do the above facts look correct?")
+ if not facts_confirmed:
+ message = """
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
+""" % oo_cfg.config_path
+ click.echo(message)
+ # Make sure we actually write out the config file.
+ oo_cfg.save_to_disk()
+ sys.exit(0)
+ return default_facts
+
+
+def check_hosts_config(oo_cfg, unattended):
+ click.clear()
+ masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
+
+ if len(masters) == 2:
+ click.echo("A minimum of 3 masters are required for HA deployments.")
+ sys.exit(1)
+
+ if len(masters) > 1:
+ master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()]
+
+ if len(master_lb) > 1:
+ click.echo('ERROR: More than one master load balancer specified. Only one is allowed.')
+ sys.exit(1)
+ elif len(master_lb) == 1:
+ if master_lb[0].is_master() or master_lb[0].is_node():
+ click.echo('ERROR: The master load balancer is configured as a master or node. '
+ 'Please correct this.')
+ sys.exit(1)
+ else:
+ message = """
+ERROR: No master load balancer specified in config. You must provide the FQDN
+of a load balancer to balance the API (port 8443) on all master hosts.
+
+https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
+"""
+ click.echo(message)
+ sys.exit(1)
+
+ dedicated_nodes = [host for host in oo_cfg.deployment.hosts
+ if host.is_node() and not host.is_master()]
+ if len(dedicated_nodes) == 0:
+ message = """
+WARNING: No dedicated nodes specified. By default, colocated masters have
+their nodes set to unschedulable. If you proceed all nodes will be labelled
+as schedulable.
+"""
+ if unattended:
+ click.echo(message)
+ else:
+ confirm_continue(message)
+
+ return
+
+
+def get_variant_and_version(multi_master=False):
+ message = "\nWhich variant would you like to install?\n\n"
+
+ i = 1
+ combos = get_variant_version_combos()
+ for (variant, version) in combos:
+ message = "%s\n(%s) %s" % (message, i, variant.description)
+ i = i + 1
+ message = "%s\n" % message
+
+ click.echo(message)
+ if multi_master:
+ click.echo('NOTE: 3.0 installations are not')
+ response = click.prompt("Choose a variant from above: ", default=1)
+ product, version = combos[response - 1]
+
+ return product, version
+
+
+def confirm_continue(message):
+ if message:
+ click.echo(message)
+ click.confirm("Are you ready to continue?", default=False, abort=True)
+ return
+
+
+def error_if_missing_info(oo_cfg):
+ missing_info = False
+ if not oo_cfg.deployment.hosts:
+ missing_info = True
+ click.echo('For unattended installs, hosts must be specified on the '
+ 'command line or in the config file: %s' % oo_cfg.config_path)
+ sys.exit(1)
+
+ if 'ansible_ssh_user' not in oo_cfg.deployment.variables:
+ click.echo("Must specify ansible_ssh_user in configuration file.")
+ sys.exit(1)
+
+ # Lookup a variant based on the key we were given:
+ if not oo_cfg.settings['variant']:
+ click.echo("No variant specified in configuration file.")
+ sys.exit(1)
+
+ ver = None
+ if 'variant_version' in oo_cfg.settings:
+ ver = oo_cfg.settings['variant_version']
+ variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
+ if variant is None or version is None:
+ err_variant_name = oo_cfg.settings['variant']
+ if ver:
+ err_variant_name = "%s %s" % (err_variant_name, ver)
+ click.echo("%s is not an installable variant." % err_variant_name)
+ sys.exit(1)
+ oo_cfg.settings['variant_version'] = version.name
+
+ # check that all listed host roles are included
+ listed_roles = oo_cfg.get_host_roles_set()
+ configured_roles = set([role for role in oo_cfg.deployment.roles])
+ if listed_roles != configured_roles:
+ missing_info = True
+ click.echo('Any roles assigned to hosts must be defined.')
+
+ if missing_info:
+ sys.exit(1)
+
+
+def get_proxy_hosts_excludes():
+ message = """
+If a proxy is needed to reach HTTP and HTTPS traffic, please enter the
+name below. This proxy will be configured by default for all processes
+that need to reach systems outside the cluster. An example proxy value
+would be:
+
+ http://proxy.example.com:8080/
+
+More advanced configuration is possible if using Ansible directly:
+
+https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html
+"""
+ click.echo(message)
+
+ message = "Specify your http proxy ? (ENTER for none)"
+ http_proxy_hostname = click.prompt(message, default='')
+
+ # TODO: Fix this prompt message and behavior. 'ENTER' will default
+ # to the http_proxy_hostname if one was provided
+ message = "Specify your https proxy ? (ENTER for none)"
+ https_proxy_hostname = click.prompt(message, default=http_proxy_hostname)
+
+ if http_proxy_hostname or https_proxy_hostname:
+ message = """
+All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value.
+Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
+"""
+ proxy_excludes = click.prompt(message, default='')
+ else:
+ proxy_excludes = ''
+
+ return http_proxy_hostname, https_proxy_hostname, proxy_excludes
+
+
+def get_missing_info_from_user(oo_cfg):
+ """ Prompts the user for any information missing from the given configuration. """
+ click.clear()
+
+ message = """
+Welcome to the OpenShift Enterprise 3 installation.
+
+Please confirm that following prerequisites have been met:
+
+* All systems where OpenShift will be installed are running Red Hat Enterprise
+ Linux 7.
+* All systems are properly subscribed to the required OpenShift Enterprise 3
+ repositories.
+* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
+* All systems have working DNS that resolves not only from the perspective of
+ the installer, but also from within the cluster.
+
+When the process completes you will have a default configuration for masters
+and nodes. For ongoing environment maintenance it's recommended that the
+official Ansible playbooks be used.
+
+For more information on installation prerequisites please see:
+https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
+"""
+ confirm_continue(message)
+ click.clear()
+
+ if not oo_cfg.deployment.variables.get('ansible_ssh_user', False):
+ oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user()
+ click.clear()
+
+ if not oo_cfg.settings.get('variant', ''):
+ variant, version = get_variant_and_version()
+ oo_cfg.settings['variant'] = variant.name
+ oo_cfg.settings['variant_version'] = version.name
+ oo_cfg.settings['variant_subtype'] = version.subtype
+ click.clear()
+
+ if not oo_cfg.deployment.hosts:
+ oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg)
+ set_infra_nodes(oo_cfg.deployment.hosts)
+
+ for role in roles:
+ oo_cfg.deployment.roles[role] = Role(name=role, variables={})
+ click.clear()
+
+ if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
+ oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
+ get_routingconfig_subdomain()
+ click.clear()
+
+ # Are any proxy vars already presisted?
+ proxy_vars = ['proxy_exclude_hosts', 'proxy_https', 'proxy_http']
+ # Empty list if NO proxy vars were presisted
+ saved_proxy_vars = [pv for pv in proxy_vars
+ if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET']
+
+ INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values",
+ len(saved_proxy_vars))
+ current_version = parse_version(
+ oo_cfg.settings.get('variant_version', '0.0'))
+ min_version = parse_version('3.2')
+
+ # No proxy vars were saved and we are running a version which
+ # recognizes proxy parameters. We must prompt the user for values
+ # if this conditional is true.
+ if not saved_proxy_vars and current_version >= min_version:
+ INSTALLER_LOG.debug("Prompting user to enter proxy values")
+ http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes()
+ oo_cfg.deployment.variables['proxy_http'] = http_proxy
+ oo_cfg.deployment.variables['proxy_https'] = https_proxy
+ oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
+ click.clear()
+
+ return oo_cfg
+
+
+def get_role_variable(oo_cfg, role_name, variable_name):
+ try:
+ target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name)
+ target_variable = target_role.variables[variable_name]
+ return target_variable
+ except (StopIteration, KeyError):
+ return None
+
+
+def set_role_variable(oo_cfg, role_name, variable_name, variable_value):
+ target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name)
+ target_role[variable_name] = variable_value
+
+
+def collect_new_nodes(oo_cfg):
+ click.clear()
+ click.echo('*** New Node Configuration ***')
+ message = """
+Add new nodes here
+ """
+ click.echo(message)
+ new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
+ return new_nodes
+
+
+def get_installed_hosts(hosts, callback_facts):
+ installed_hosts = []
+ uninstalled_hosts = []
+ for host in [h for h in hosts if h.is_master() or h.is_node()]:
+ if host.connect_to in callback_facts.keys():
+ if is_installed_host(host, callback_facts):
+ INSTALLER_LOG.debug("%s is already installed", str(host))
+ installed_hosts.append(host)
+ else:
+ INSTALLER_LOG.debug("%s is not installed", str(host))
+ uninstalled_hosts.append(host)
+ return installed_hosts, uninstalled_hosts
+
+
+def is_installed_host(host, callback_facts):
+ version_found = 'common' in callback_facts[host.connect_to].keys() and \
+ callback_facts[host.connect_to]['common'].get('version', '') and \
+ callback_facts[host.connect_to]['common'].get('version', '') != 'None'
+
+ return version_found
+
+
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+ """
+ We get here once there are hosts in oo_cfg and we need to find out what
+ state they are in. There are several different cases that might occur:
+
+ 1. All hosts in oo_cfg are uninstalled. In this case, we should proceed
+ with a normal installation.
+ 2. All hosts in oo_cfg are installed. In this case, ask the user if they
+ want to force reinstall or exit. We can also hint in this case about
+ the scaleup workflow.
+ 3. Some hosts are installed and some are uninstalled. In this case, prompt
+ the user if they want to force (re)install all hosts specified or direct
+ them to the scaleup workflow and exit.
+ """
+
+ hosts_to_run_on = []
+ # Check if master or nodes already have something installed
+ installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts,
+ callback_facts)
+ nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()]
+ masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()]
+
+ in_hosts = [str(h) for h in installed_hosts]
+ un_hosts = [str(h) for h in uninstalled_hosts]
+ all_hosts = [str(h) for h in oo_cfg.deployment.hosts]
+ m_and_n = [str(h) for h in masters_and_nodes]
+
+ INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts))
+ INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts))
+ INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts))
+ INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n))
+
+ # Case (1): All uninstalled hosts
+ if len(uninstalled_hosts) == len(nodes):
+ click.echo('All hosts in config are uninstalled. Proceeding with installation...')
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
+ else:
+ # Case (2): All installed hosts
+ if len(installed_hosts) == len(masters_and_nodes):
+ message = """
+All specified hosts in specified environment are installed.
+"""
+ # Case (3): Some installed, some uninstalled
+ else:
+ message = """
+A mix of installed and uninstalled hosts have been detected in your environment.
+Please make sure your environment was installed successfully before adding new nodes.
+"""
+
+ # Still inside the case 2/3 else condition
+ mixed_msg = """
+\tInstalled hosts:
+\t\t{inst_hosts}
+
+\tUninstalled hosts:
+\t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts))
+ click.echo(mixed_msg)
+
+ # Out of the case 2/3 if/else
+ click.echo(message)
+
+ if not unattended:
+ response = click.confirm('Do you want to (re)install the environment?\n\n'
+ 'Note: This will potentially erase any custom changes.')
+ if response:
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
+ force = True
+ elif unattended and force:
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
+ if not force:
+ message = """
+If you want to force reinstall of your environment, run:
+`atomic-openshift-installer install --force`
+
+If you want to add new nodes to this environment, run:
+`atomic-openshift-installer scaleup`
+"""
+ click.echo(message)
+ sys.exit(1)
+
+ return hosts_to_run_on, callback_facts
+
+
+def set_infra_nodes(hosts):
+ if all(host.is_master() for host in hosts):
+ infra_list = hosts
+ else:
+ nodes_list = [host for host in hosts if host.is_schedulable_node(hosts)]
+ infra_list = nodes_list[:2]
+
+ for host in infra_list:
+ host.node_labels = "{'region': 'infra'}"
+
+
+def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory):
+ # Write Ansible inventory file to disk:
+ inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
+
+ click.echo()
+ click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
+ click.echo("Wrote Ansible inventory: %s" % inventory_file)
+ click.echo()
+
+ if gen_inventory:
+ sys.exit(0)
+
+ click.echo('Ready to run installation process.')
+ message = """
+If changes are needed please edit the config file above and re-run.
+"""
+ if not unattended:
+ confirm_continue(message)
+
+ error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
+ hosts_to_run_on, verbose)
+
+ if error:
+ # The bootstrap script will print out the log location.
+ message = """
+An error was detected. After resolving the problem please relaunch the
+installation process.
+"""
+ click.echo(message)
+ sys.exit(1)
+ else:
+ message = """
+The installation was successful!
+
+If this is your first time installing please take a look at the Administrator
+Guide for advanced options related to routing, storage, authentication, and
+more:
+
+http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
+"""
+ click.echo(message)
+
+
+@click.group()
+@click.pass_context
+@click.option('--unattended', '-u', is_flag=True, default=False)
+@click.option('--configuration', '-c',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-playbook-directory',
+ '-a',
+ type=click.Path(exists=True,
+ file_okay=False,
+ dir_okay=True,
+ readable=True),
+ # callback=validate_ansible_dir,
+ default=DEFAULT_PLAYBOOK_DIR,
+ envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
+@click.option('--ansible-log-path',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
+@click.option('-d', '--debug',
+ help="Enable installer debugging (/tmp/installer.log)",
+ is_flag=True, default=False)
+@click.help_option('--help', '-h')
+# pylint: disable=too-many-arguments
+# pylint: disable=line-too-long
+# Main CLI entrypoint, not much we can do about too many arguments.
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug):
+ """
+ atomic-openshift-installer makes the process for installing OSE or AEP
+ easier by interactively gathering the data needed to run on each host.
+ It can also be run in unattended mode if provided with a configuration file.
+
+ Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
+ """
+ if debug:
+ # DEFAULT log level threshold is set to CRITICAL (the
+ # highest), anything below that (we only use debug/warning
+ # presently) is not logged. If '-d' is given though, we'll
+ # lower the threshold to debug (almost everything gets through)
+ INSTALLER_LOG.setLevel(logging.DEBUG)
+ INSTALLER_LOG.debug("Quick Installer debugging initialized")
+
+ ctx.obj = {}
+ ctx.obj['unattended'] = unattended
+ ctx.obj['configuration'] = configuration
+ ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
+
+ try:
+ oo_cfg = OOConfig(ctx.obj['configuration'])
+ except OOConfigInvalidHostError as err:
+ click.echo(err)
+ sys.exit(1)
+
+ # If no playbook dir on the CLI, check the config:
+ if not ansible_playbook_directory:
+ ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
+ # If still no playbook dir, check for the default location:
+ if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
+ ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
+ validate_ansible_dir(ansible_playbook_directory)
+ oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
+ oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+ ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
+
+ if os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ # If we're installed by RPM this file should exist and we can use it as our default:
+ oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
+
+ if not verbose and os.path.exists(QUIET_ANSIBLE_CONFIG):
+ oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG
+
+ oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
+
+ ctx.obj['oo_cfg'] = oo_cfg
+ openshift_ansible.set_config(oo_cfg)
+
+
+@click.command()
+@click.pass_context
+def uninstall(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if hasattr(oo_cfg, 'deployment'):
+ hosts = oo_cfg.deployment.hosts
+ elif hasattr(oo_cfg, 'hosts'):
+ hosts = oo_cfg.hosts
+ else:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+
+ click.echo("OpenShift will be uninstalled from the following hosts:\n")
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ for host in hosts:
+ click.echo(" * %s" % host.connect_to)
+ proceed = click.confirm("\nDo you want to proceed?")
+ if not proceed:
+ click.echo("Uninstall cancelled.")
+ sys.exit(0)
+
+ openshift_ansible.run_uninstall_playbook(hosts, verbose)
+
+
+@click.command()
+@click.option('--latest-minor', '-l', is_flag=True, default=False)
+@click.option('--next-major', '-n', is_flag=True, default=False)
+@click.pass_context
+# pylint: disable=too-many-statements,too-many-branches
+def upgrade(ctx, latest_minor, next_major):
+ oo_cfg = ctx.obj['oo_cfg']
+
+ if len(oo_cfg.deployment.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+
+ variant = oo_cfg.settings['variant']
+ if find_variant(variant)[0] is None:
+ click.echo("%s is not a supported variant for upgrade." % variant)
+ sys.exit(0)
+
+ old_version = oo_cfg.settings['variant_version']
+
+ try:
+ mapping = UPGRADE_MAPPINGS[old_version]
+ except KeyError:
+ click.echo('No upgrades available for %s %s' % (variant, old_version))
+ sys.exit(0)
+
+ message = """
+ This tool will help you upgrade your existing OpenShift installation.
+ Currently running: %s %s
+"""
+ click.echo(message % (variant, old_version))
+
+ # Map the dynamic upgrade options to the playbook to run for each.
+ # Index offset by 1.
+ # List contains tuples of booleans for (latest_minor, next_major)
+ selections = []
+ if not (latest_minor or next_major):
+ i = 0
+ if 'minor_playbook' in mapping:
+ click.echo("(%s) Update to latest %s" % (i + 1, old_version))
+ selections.append((True, False))
+ i += 1
+ if 'major_playbook' in mapping:
+ click.echo("(%s) Upgrade to next release: %s" % (i + 1, mapping['major_version']))
+ selections.append((False, True))
+ i += 1
+
+ response = click.prompt("\nChoose an option from above",
+ type=click.Choice(list(map(str, range(1, len(selections) + 1)))))
+ latest_minor, next_major = selections[int(response) - 1]
+
+ if next_major:
+ if 'major_playbook' not in mapping:
+ click.echo("No major upgrade supported for %s %s with this version "
+ "of atomic-openshift-utils." % (variant, old_version))
+ sys.exit(0)
+ playbook = mapping['major_playbook']
+ new_version = mapping['major_version']
+ # Update config to reflect the version we're targeting, we'll write
+ # to disk once Ansible completes successfully, not before.
+ oo_cfg.settings['variant_version'] = new_version
+ if oo_cfg.settings['variant'] == 'enterprise':
+ oo_cfg.settings['variant'] = 'openshift-enterprise'
+
+ if latest_minor:
+ if 'minor_playbook' not in mapping:
+ click.echo("No minor upgrade supported for %s %s with this version "
+ "of atomic-openshift-utils." % (variant, old_version))
+ sys.exit(0)
+ playbook = mapping['minor_playbook']
+ new_version = old_version
+
+ click.echo("OpenShift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % (
+ variant, old_version, oo_cfg.settings['variant'], new_version))
+ for host in oo_cfg.deployment.hosts:
+ click.echo(" * %s" % host.connect_to)
+
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ if not click.confirm("\nDo you want to proceed?"):
+ click.echo("Upgrade cancelled.")
+ sys.exit(0)
+
+ retcode = openshift_ansible.run_upgrade_playbook(oo_cfg.deployment.hosts,
+ playbook,
+ ctx.obj['verbose'])
+ if retcode > 0:
+ click.echo("Errors encountered during upgrade, please check %s." %
+ oo_cfg.settings['ansible_log_path'])
+ else:
+ oo_cfg.save_to_disk()
+ click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+
+
+@click.command()
+@click.option('--force', '-f', is_flag=True, default=False)
+@click.option('--gen-inventory', is_flag=True, default=False,
+ help="Generate an Ansible inventory file and exit.")
+@click.pass_context
+def install(ctx, force, gen_inventory):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+ unattended = ctx.obj['unattended']
+
+ if unattended:
+ error_if_missing_info(oo_cfg)
+ else:
+ oo_cfg = get_missing_info_from_user(oo_cfg)
+
+ check_hosts_config(oo_cfg, unattended)
+
+ print_installation_summary(oo_cfg.deployment.hosts,
+ oo_cfg.settings.get('variant_version', None))
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
+ verbose)
+
+ if error or callback_facts is None:
+ click.echo("There was a problem fetching the required information. "
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg,
+ callback_facts,
+ unattended,
+ force)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ # Write quick installer config file to disk:
+ oo_cfg.save_to_disk()
+
+ run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
+
+
+@click.command()
+@click.option('--gen-inventory', is_flag=True, default=False,
+ help="Generate an Ansible inventory file and exit.")
+@click.pass_context
+def scaleup(ctx, gen_inventory):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+ unattended = ctx.obj['unattended']
+
+ installed_hosts = list(oo_cfg.deployment.hosts)
+
+ if len(installed_hosts) == 0:
+ click.echo('No hosts specified.')
+ sys.exit(1)
+
+ click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
+
+ print_installation_summary(installed_hosts,
+ oo_cfg.settings['variant_version'],
+ verbose=False,)
+ message = """
+---
+
+We have detected this previously installed OpenShift environment.
+
+This tool will guide you through the process of adding additional
+nodes to your cluster.
+"""
+ confirm_continue(message)
+
+ error_if_missing_info(oo_cfg)
+ check_hosts_config(oo_cfg, True)
+
+ installed_masters = [host for host in installed_hosts if host.is_master()]
+ new_nodes = collect_new_nodes(oo_cfg)
+
+ oo_cfg.deployment.hosts.extend(new_nodes)
+ hosts_to_run_on = installed_masters + new_nodes
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
+ if error or callback_facts is None:
+ click.echo("There was a problem fetching the required information. See "
+ "{} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ print_installation_summary(oo_cfg.deployment.hosts,
+ oo_cfg.settings.get('variant_version', None))
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
+ verbose)
+
+ if error or callback_facts is None:
+ click.echo("There was a problem fetching the required information. "
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ # Write quick installer config file to disk:
+ oo_cfg.save_to_disk()
+ run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
+
+
+cli.add_command(install)
+cli.add_command(scaleup)
+cli.add_command(upgrade)
+cli.add_command(uninstall)
+
+if __name__ == '__main__':
+ # This is expected behaviour for context passing with click library:
+ # pylint: disable=unexpected-keyword-arg
+ cli(obj={})
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
new file mode 100644
index 000000000..64eb340f3
--- /dev/null
+++ b/utils/src/ooinstall/oo_config.py
@@ -0,0 +1,450 @@
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+
+import os
+import sys
+import logging
+import yaml
+from pkg_resources import resource_filename
+
+
+installer_log = logging.getLogger('installer')
+
+CONFIG_PERSIST_SETTINGS = [
+ 'ansible_ssh_user',
+ 'ansible_callback_facts_yaml',
+ 'ansible_inventory_path',
+ 'ansible_log_path',
+ 'deployment',
+ 'version',
+ 'variant',
+ 'variant_subtype',
+ 'variant_version',
+]
+
+DEPLOYMENT_VARIABLES_BLACKLIST = [
+ 'hosts',
+ 'roles',
+]
+
+HOST_VARIABLES_BLACKLIST = [
+ 'ip',
+ 'public_ip',
+ 'hostname',
+ 'public_hostname',
+ 'node_labels',
+ 'containerized',
+ 'preconfigured',
+ 'schedulable',
+ 'other_variables',
+ 'roles',
+]
+
+DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
+
+
+def print_read_config_error(error, path='the configuration file'):
+ message = """
+Error loading config. {}.
+
+See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
+for information on creating a configuration file or delete {} and re-run the installer.
+"""
+ print message.format(error, path)
+
+
+class OOConfigFileError(Exception):
+ """The provided config file path can't be read/written
+ """
+ pass
+
+
+class OOConfigInvalidHostError(Exception):
+ """ Host in config is missing both ip and hostname. """
+ pass
+
+
+class Host(object):
+ """ A system we will or have installed OpenShift on. """
+ def __init__(self, **kwargs):
+ self.ip = kwargs.get('ip', None)
+ self.hostname = kwargs.get('hostname', None)
+ self.public_ip = kwargs.get('public_ip', None)
+ self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
+
+ self.preconfigured = kwargs.get('preconfigured', None)
+ self.schedulable = kwargs.get('schedulable', None)
+ self.new_host = kwargs.get('new_host', None)
+ self.containerized = kwargs.get('containerized', False)
+ self.node_labels = kwargs.get('node_labels', '')
+
+ # allowable roles: master, node, etcd, storage, master_lb
+ self.roles = kwargs.get('roles', [])
+
+ self.other_variables = kwargs.get('other_variables', {})
+
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError(
+ "You must specify either an ip or hostname as 'connect_to'")
+
+ def __str__(self):
+ return self.connect_to
+
+ def __repr__(self):
+ return self.connect_to
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+
+ for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to',
+ 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', ]:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ for variable, value in self.other_variables.iteritems():
+ d[variable] = value
+
+ return d
+
+ def is_master(self):
+ return 'master' in self.roles
+
+ def is_node(self):
+ return 'node' in self.roles
+
+ def is_master_lb(self):
+ return 'master_lb' in self.roles
+
+ def is_storage(self):
+ return 'storage' in self.roles
+
+ def is_etcd(self):
+ """ Does this host have the etcd role """
+ return 'etcd' in self.roles
+
+ def is_etcd_member(self, all_hosts):
+ """ Will this host be a member of a standalone etcd cluster. """
+ if not self.is_master():
+ return False
+ masters = [host for host in all_hosts if host.is_master()]
+ if len(masters) > 1:
+ return True
+ return False
+
+ def is_dedicated_node(self):
+ """ Will this host be a dedicated node. (not a master) """
+ return self.is_node() and not self.is_master()
+
+ def is_schedulable_node(self, all_hosts):
+ """ Will this host be a node marked as schedulable. """
+ if not self.is_node():
+ return False
+ if not self.is_master():
+ return True
+
+ masters = [host for host in all_hosts if host.is_master()]
+ nodes = [host for host in all_hosts if host.is_node()]
+ if len(masters) == len(nodes):
+ return True
+ return False
+
+
+class Role(object):
+ """ A role that will be applied to a host. """
+ def __init__(self, name, variables):
+ self.name = name
+ self.variables = variables
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+ for prop in ['name', 'variables']:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ return d
+
+
+class Deployment(object):
+ def __init__(self, **kwargs):
+ self.hosts = kwargs.get('hosts', [])
+ self.roles = kwargs.get('roles', {})
+ self.variables = kwargs.get('variables', {})
+
+
+class OOConfig(object):
+ default_dir = os.path.normpath(
+ os.environ.get('XDG_CONFIG_HOME',
+ os.environ['HOME'] + '/.config/') + '/openshift/')
+ default_file = '/installer.cfg.yml'
+
+ def __init__(self, config_path):
+ if config_path:
+ self.config_path = os.path.normpath(config_path)
+ else:
+ self.config_path = os.path.normpath(self.default_dir +
+ self.default_file)
+ self.deployment = Deployment(hosts=[], roles={}, variables={})
+ self.settings = {}
+ self._read_config()
+ self._set_defaults()
+
+ # pylint: disable=too-many-branches
+ # Lots of different checks ran in a single method, could
+ # use a little refactoring-love some time
+ def _read_config(self):
+ installer_log.debug("Attempting to read the OO Config")
+ try:
+ installer_log.debug("Attempting to see if the provided config file exists: %s", self.config_path)
+ if os.path.exists(self.config_path):
+ installer_log.debug("We think the config file exists: %s", self.config_path)
+ with open(self.config_path, 'r') as cfgfile:
+ loaded_config = yaml.safe_load(cfgfile.read())
+
+ if 'version' not in loaded_config:
+ print_read_config_error('Legacy configuration file found', self.config_path)
+ sys.exit(0)
+
+ if loaded_config.get('version', '') == 'v1':
+ loaded_config = self._upgrade_v1_config(loaded_config)
+
+ try:
+ host_list = loaded_config['deployment']['hosts']
+ role_list = loaded_config['deployment']['roles']
+ except KeyError as e:
+ print_read_config_error("No such key: {}".format(e), self.config_path)
+ sys.exit(0)
+
+ for setting in CONFIG_PERSIST_SETTINGS:
+ persisted_value = loaded_config.get(setting)
+ if persisted_value is not None:
+ self.settings[setting] = str(persisted_value)
+
+ # We've loaded any persisted configs, let's verify any
+ # paths which are required for a correct and complete
+ # install
+
+ # - ansible_callback_facts_yaml - Settings from a
+ # pervious run. If the file doesn't exist then we
+ # will just warn about it for now and recollect the
+ # facts.
+ if self.settings.get('ansible_callback_facts_yaml', None) is not None:
+ if not os.path.exists(self.settings['ansible_callback_facts_yaml']):
+ # Cached callback facts file does not exist
+ installer_log.warning("The specified 'ansible_callback_facts_yaml'"
+ "file does not exist (%s)",
+ self.settings['ansible_callback_facts_yaml'])
+ installer_log.debug("Remote system facts will be collected again later")
+ self.settings.pop('ansible_callback_facts_yaml')
+
+ for setting in loaded_config['deployment']:
+ try:
+ if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
+ self.deployment.variables[setting] = \
+ str(loaded_config['deployment'][setting])
+ except KeyError:
+ continue
+
+ # Parse the hosts into DTO objects:
+ for host in host_list:
+ host['other_variables'] = {}
+ for variable, value in host.iteritems():
+ if variable not in HOST_VARIABLES_BLACKLIST:
+ host['other_variables'][variable] = value
+ self.deployment.hosts.append(Host(**host))
+
+ # Parse the roles into Objects
+ for name, variables in role_list.iteritems():
+ self.deployment.roles.update({name: Role(name, variables)})
+
+ except IOError, ferr:
+ raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
+ ferr.strerror))
+ except yaml.scanner.ScannerError:
+ raise OOConfigFileError(
+ 'Config file "{}" is not a valid YAML document'.format(self.config_path))
+ installer_log.debug("Parsed the config file")
+
+ def _upgrade_v1_config(self, config):
+ new_config_data = {}
+ new_config_data['deployment'] = {}
+ new_config_data['deployment']['hosts'] = []
+ new_config_data['deployment']['roles'] = {}
+ new_config_data['deployment']['variables'] = {}
+
+ role_list = {}
+
+ if config.get('ansible_ssh_user', False):
+ new_config_data['deployment']['ansible_ssh_user'] = config['ansible_ssh_user']
+
+ if config.get('variant', False):
+ new_config_data['variant'] = config['variant']
+
+ if config.get('variant_version', False):
+ new_config_data['variant_version'] = config['variant_version']
+
+ for host in config['hosts']:
+ host_props = {}
+ host_props['roles'] = []
+ host_props['connect_to'] = host['connect_to']
+
+ for prop in ['ip', 'public_ip', 'hostname', 'public_hostname', 'containerized', 'preconfigured']:
+ host_props[prop] = host.get(prop, None)
+
+ for role in ['master', 'node', 'master_lb', 'storage', 'etcd']:
+ if host.get(role, False):
+ host_props['roles'].append(role)
+ role_list[role] = ''
+
+ new_config_data['deployment']['hosts'].append(host_props)
+
+ new_config_data['deployment']['roles'] = role_list
+
+ return new_config_data
+
+ def _set_defaults(self):
+ installer_log.debug("Setting defaults, current OOConfig settings: %s", self.settings)
+
+ if 'ansible_inventory_directory' not in self.settings:
+ self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
+
+ if not os.path.exists(self.settings['ansible_inventory_directory']):
+ installer_log.debug("'ansible_inventory_directory' does not exist, "
+ "creating it now (%s)",
+ self.settings['ansible_inventory_directory'])
+ os.makedirs(self.settings['ansible_inventory_directory'])
+ else:
+ installer_log.debug("We think this 'ansible_inventory_directory' "
+ "is OK: %s",
+ self.settings['ansible_inventory_directory'])
+
+ if 'ansible_plugins_directory' not in self.settings:
+ self.settings['ansible_plugins_directory'] = \
+ resource_filename(__name__, 'ansible_plugins')
+ installer_log.debug("We think the ansible plugins directory should be: %s (it is not already set)",
+ self.settings['ansible_plugins_directory'])
+ else:
+ installer_log.debug("The ansible plugins directory is already set: %s",
+ self.settings['ansible_plugins_directory'])
+
+ if 'version' not in self.settings:
+ self.settings['version'] = 'v2'
+
+ if 'ansible_callback_facts_yaml' not in self.settings:
+ installer_log.debug("No 'ansible_callback_facts_yaml' in self.settings")
+ self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
+ self.settings['ansible_inventory_directory']
+ installer_log.debug("Value: %s", self.settings['ansible_callback_facts_yaml'])
+ else:
+ installer_log.debug("'ansible_callback_facts_yaml' already set "
+ "in self.settings: %s",
+ self.settings['ansible_callback_facts_yaml'])
+
+ if 'ansible_ssh_user' not in self.settings:
+ self.settings['ansible_ssh_user'] = ''
+
+ self.settings['ansible_inventory_path'] = \
+ '{}/hosts'.format(os.path.dirname(self.config_path))
+
+ # pylint: disable=consider-iterating-dictionary
+ # Disabled because we shouldn't alter the container we're
+ # iterating over
+ #
+ # clean up any empty sets
+ for setting in self.settings.keys():
+ if not self.settings[setting]:
+ self.settings.pop(setting)
+
+ installer_log.debug("Updated OOConfig settings: %s", self.settings)
+
+ def _default_ansible_inv_dir(self):
+ return os.path.normpath(
+ os.path.dirname(self.config_path) + "/.ansible")
+
+ def calc_missing_facts(self):
+ """
+ Determine which host facts are not defined in the config.
+
+ Returns a hash of host to a list of the missing facts.
+ """
+ result = {}
+
+ for host in self.deployment.hosts:
+ missing_facts = []
+ if host.preconfigured:
+ required_facts = PRECONFIGURED_REQUIRED_FACTS
+ else:
+ required_facts = DEFAULT_REQUIRED_FACTS
+
+ for required_fact in required_facts:
+ if not getattr(host, required_fact):
+ missing_facts.append(required_fact)
+ if len(missing_facts) > 0:
+ result[host.connect_to] = missing_facts
+ return result
+
+ def save_to_disk(self):
+ out_file = open(self.config_path, 'w')
+ out_file.write(self.yaml())
+ out_file.close()
+
+ def persist_settings(self):
+ p_settings = {}
+
+ for setting in CONFIG_PERSIST_SETTINGS:
+ if setting in self.settings and self.settings[setting]:
+ p_settings[setting] = self.settings[setting]
+
+ p_settings['deployment'] = {}
+ p_settings['deployment']['hosts'] = []
+ p_settings['deployment']['roles'] = {}
+
+ for host in self.deployment.hosts:
+ p_settings['deployment']['hosts'].append(host.to_dict())
+
+ for name, role in self.deployment.roles.iteritems():
+ p_settings['deployment']['roles'][name] = role.variables
+
+ for setting in self.deployment.variables:
+ if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
+ p_settings['deployment'][setting] = self.deployment.variables[setting]
+
+ try:
+ p_settings['variant'] = self.settings['variant']
+ p_settings['variant_version'] = self.settings['variant_version']
+
+ if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
+ p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
+ except KeyError as e:
+ print "Error persisting settings: {}".format(e)
+ sys.exit(0)
+
+ return p_settings
+
+ def yaml(self):
+ return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
+
+ def __str__(self):
+ return self.yaml()
+
+ def get_host(self, name):
+ for host in self.deployment.hosts:
+ if host.connect_to == name:
+ return host
+ return None
+
+ def get_host_roles_set(self):
+ roles_set = set()
+ for host in self.deployment.hosts:
+ for role in host.roles:
+ roles_set.add(role)
+
+ return roles_set
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
new file mode 100644
index 000000000..f542fb376
--- /dev/null
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -0,0 +1,339 @@
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+
+import socket
+import subprocess
+import sys
+import os
+import logging
+import yaml
+from ooinstall.variants import find_variant
+from ooinstall.utils import debug_env
+
+installer_log = logging.getLogger('installer')
+
+CFG = None
+
+ROLES_TO_GROUPS_MAP = {
+ 'master': 'masters',
+ 'node': 'nodes',
+ 'etcd': 'etcd',
+ 'storage': 'nfs',
+ 'master_lb': 'lb'
+}
+
+VARIABLES_MAP = {
+ 'ansible_ssh_user': 'ansible_ssh_user',
+ 'deployment_type': 'deployment_type',
+ 'variant_subtype': 'deployment_subtype',
+ 'master_routingconfig_subdomain': 'openshift_master_default_subdomain',
+ 'proxy_http': 'openshift_http_proxy',
+ 'proxy_https': 'openshift_https_proxy',
+ 'proxy_exclude_hosts': 'openshift_no_proxy',
+}
+
+HOST_VARIABLES_MAP = {
+ 'ip': 'openshift_ip',
+ 'public_ip': 'openshift_public_ip',
+ 'hostname': 'openshift_hostname',
+ 'public_hostname': 'openshift_public_hostname',
+ 'containerized': 'containerized',
+}
+
+
+def set_config(cfg):
+ global CFG
+ CFG = cfg
+
+
+def generate_inventory(hosts):
+ global CFG
+
+ new_nodes = [host for host in hosts if host.is_node() and host.new_host]
+ scaleup = len(new_nodes) > 0
+
+ lb = determine_lb_configuration(hosts)
+
+ base_inventory_path = CFG.settings['ansible_inventory_path']
+ base_inventory = open(base_inventory_path, 'w')
+
+ write_inventory_children(base_inventory, scaleup)
+
+ write_inventory_vars(base_inventory, lb)
+
+ # write_inventory_hosts
+ for role in CFG.deployment.roles:
+ # write group block
+ group = ROLES_TO_GROUPS_MAP.get(role, role)
+ base_inventory.write("\n[{}]\n".format(group))
+ # write each host
+ group_hosts = [host for host in hosts if role in host.roles]
+ for host in group_hosts:
+ schedulable = host.is_schedulable_node(hosts)
+ write_host(host, role, base_inventory, schedulable)
+
+ if scaleup:
+ base_inventory.write('\n[new_nodes]\n')
+ for node in new_nodes:
+ write_host(node, 'new_nodes', base_inventory)
+
+ base_inventory.close()
+ return base_inventory_path
+
+
+def determine_lb_configuration(hosts):
+ lb = next((host for host in hosts if host.is_master_lb()), None)
+ if lb:
+ if lb.hostname is None:
+ lb.hostname = lb.connect_to
+ lb.public_hostname = lb.connect_to
+
+ return lb
+
+
+def write_inventory_children(base_inventory, scaleup):
+ global CFG
+
+ base_inventory.write('\n[OSEv3:children]\n')
+ for role in CFG.deployment.roles:
+ child = ROLES_TO_GROUPS_MAP.get(role, role)
+ base_inventory.write('{}\n'.format(child))
+
+ if scaleup:
+ base_inventory.write('new_nodes\n')
+
+
+# pylint: disable=too-many-branches
+def write_inventory_vars(base_inventory, lb):
+ global CFG
+ base_inventory.write('\n[OSEv3:vars]\n')
+
+ for variable, value in CFG.settings.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, None)
+ if inventory_var and value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+
+ for variable, value in CFG.deployment.variables.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, variable)
+ if value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+
+ if CFG.deployment.variables['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_become=yes\n')
+
+ if lb is not None:
+ base_inventory.write('openshift_master_cluster_method=native\n')
+ base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
+ base_inventory.write(
+ "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
+
+ if CFG.settings.get('variant_version', None) == '3.1':
+ # base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
+ base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
+
+ write_proxy_settings(base_inventory)
+
+ # Find the correct deployment type for ansible:
+ ver = find_variant(CFG.settings['variant'],
+ version=CFG.settings.get('variant_version', None))[1]
+ base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
+ if getattr(ver, 'variant_subtype', False):
+ base_inventory.write('deployment_subtype={}\n'.format(ver.deployment_subtype))
+
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('openshift_docker_additional_registries={}\n'.format(
+ os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('openshift_docker_insecure_registries={}\n'.format(
+ os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
+ "'name': 'ose-devel', "
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
+
+ for name, role_obj in CFG.deployment.roles.iteritems():
+ if role_obj.variables:
+ group_name = ROLES_TO_GROUPS_MAP.get(name, name)
+ base_inventory.write("\n[{}:vars]\n".format(group_name))
+ for variable, value in role_obj.variables.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, variable)
+ if value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+ base_inventory.write("\n")
+
+
+def write_proxy_settings(base_inventory):
+ try:
+ base_inventory.write("openshift_http_proxy={}\n".format(
+ CFG.settings['openshift_http_proxy']))
+ except KeyError:
+ pass
+ try:
+ base_inventory.write("openshift_https_proxy={}\n".format(
+ CFG.settings['openshift_https_proxy']))
+ except KeyError:
+ pass
+ try:
+ base_inventory.write("openshift_no_proxy={}\n".format(
+ CFG.settings['openshift_no_proxy']))
+ except KeyError:
+ pass
+
+
+def write_host(host, role, inventory, schedulable=None):
+ global CFG
+
+ if host.preconfigured:
+ return
+
+ facts = ''
+ for prop in HOST_VARIABLES_MAP:
+ if getattr(host, prop):
+ facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
+
+ if host.other_variables:
+ for variable, value in host.other_variables.iteritems():
+ facts += " {}={}".format(variable, value)
+
+ if host.node_labels and role == 'node':
+ facts += ' openshift_node_labels="{}"'.format(host.node_labels)
+
+ # Distinguish between three states, no schedulability specified (use default),
+ # explicitly set to True, or explicitly set to False:
+ if role != 'node' or schedulable is None:
+ pass
+ else:
+ facts += " openshift_schedulable={}".format(schedulable)
+
+ installer_host = socket.gethostname()
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=yes'
+
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
+
+
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
+ """
+ Retrieves system facts from the remote systems.
+ """
+ installer_log.debug("Inside load_system_facts")
+ installer_log.debug("load_system_facts will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
+
+ FNULL = open(os.devnull, 'w')
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
+ installer_log.debug("Subprocess will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
+ if status != 0:
+ installer_log.debug("Exit status from subprocess was not 0")
+ return [], 1
+
+ with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
+ installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
+ try:
+ callback_facts = yaml.safe_load(callback_facts_file)
+ except yaml.YAMLError, exc:
+ print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc
+ print "Try deleting and rerunning the atomic-openshift-installer"
+ sys.exit(1)
+
+ return callback_facts, 0
+
+
+def default_facts(hosts, verbose=False):
+ global CFG
+ installer_log.debug("Current global CFG vars here: %s", CFG)
+ inventory_file = generate_inventory(hosts)
+ os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
+
+ facts_env = os.environ.copy()
+ facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
+ facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
+ if 'ansible_log_path' in CFG.settings:
+ facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+
+ installer_log.debug("facts_env: %s", facts_env)
+ installer_log.debug("Going to 'load_system_facts' next")
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
+
+
+def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):
+ global CFG
+ if len(hosts_to_run_on) != len(hosts):
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/openshift-node/scaleup.yml')
+ else:
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/openshift-cluster/config.yml')
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
+
+
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ installer_log.debug("run_ansible will run with Ansible/Openshift environment variables:")
+ debug_env(env_vars)
+
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
+ return subprocess.call(args, env=env_vars)
+
+
+def run_uninstall_playbook(hosts, verbose=False):
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/adhoc/uninstall.yml')
+ inventory_file = generate_inventory(hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
+
+def run_upgrade_playbook(hosts, playbook, verbose=False):
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/byo/openshift-cluster/upgrades/{}'.format(playbook))
+
+ # TODO: Upgrade inventory for upgrade?
+ inventory_file = generate_inventory(hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ # override the ansible config for our main playbook run
+ if 'ansible_quiet_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
+
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
diff --git a/utils/src/ooinstall/utils.py b/utils/src/ooinstall/utils.py
new file mode 100644
index 000000000..85a77c75e
--- /dev/null
+++ b/utils/src/ooinstall/utils.py
@@ -0,0 +1,21 @@
+import logging
+import re
+
+
+installer_log = logging.getLogger('installer')
+
+
+def debug_env(env):
+ for k in sorted(env.keys()):
+ if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"):
+ installer_log.debug("{key}: {value}".format(
+ key=k, value=env[k]))
+
+
+def is_valid_hostname(hostname):
+ if not hostname or len(hostname) > 255:
+ return False
+ if hostname[-1] == ".":
+ hostname = hostname[:-1] # strip exactly one dot from the right, if present
+ allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+ return all(allowed.match(x) for x in hostname.split("."))
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
new file mode 100644
index 000000000..39772bb2e
--- /dev/null
+++ b/utils/src/ooinstall/variants.py
@@ -0,0 +1,96 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-few-public-methods
+
+"""
+Defines the supported variants and versions the installer supports, and metadata
+required to run Ansible correctly.
+
+This module needs to be updated for each major release to allow the new version
+to be specified by the user, and to point the generic variants to the latest
+version.
+"""
+
+import logging
+installer_log = logging.getLogger('installer')
+
+
+class Version(object):
+ def __init__(self, name, ansible_key, subtype=''):
+ self.name = name # i.e. 3.0, 3.1
+
+ self.ansible_key = ansible_key
+ self.subtype = subtype
+
+
+class Variant(object):
+ def __init__(self, name, description, versions):
+ # Supported variant name:
+ self.name = name
+
+ # Friendly name for the variant:
+ self.description = description
+
+ self.versions = versions
+
+ def latest_version(self):
+ return self.versions[0]
+
+
+# WARNING: Keep the versions ordered, most recent first:
+OSE = Variant('openshift-enterprise', 'OpenShift Container Platform',
+ [
+ Version('3.4', 'openshift-enterprise'),
+ ]
+)
+
+REG = Variant('openshift-enterprise', 'Registry',
+ [
+ Version('3.4', 'openshift-enterprise', 'registry'),
+ ]
+)
+
+origin = Variant('origin', 'OpenShift Origin',
+ [
+ Version('1.4', 'origin'),
+ ]
+)
+
+LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform',
+ [
+ Version('3.3', 'openshift-enterprise'),
+ Version('3.2', 'openshift-enterprise'),
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'openshift-enterprise'),
+ ]
+)
+
+# Ordered list of variants we can install, first is the default.
+SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY)
+DISPLAY_VARIANTS = (OSE, REG,)
+
+
+def find_variant(name, version=None):
+ """
+ Locate the variant object for the variant given in config file, and
+ the correct version to use for it.
+ Return (None, None) if we can't find a match.
+ """
+ prod = None
+ for prod in SUPPORTED_VARIANTS:
+ if prod.name == name:
+ if version is None:
+ return (prod, prod.latest_version())
+ for v in prod.versions:
+ if v.name == version:
+ return (prod, v)
+
+ return (None, None)
+
+
+def get_variant_version_combos():
+ combos = []
+ for variant in DISPLAY_VARIANTS:
+ for ver in variant.versions:
+ combos.append((variant, ver))
+ return combos
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
new file mode 100644
index 000000000..af91ab6a7
--- /dev/null
+++ b/utils/test-requirements.txt
@@ -0,0 +1,12 @@
+enum
+configparser
+pylint
+pep8
+nose
+coverage
+mock
+flake8
+PyYAML
+click
+backports.functools_lru_cache
+pyOpenSSL
diff --git a/utils/test/__init__.py b/utils/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/__init__.py
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
new file mode 100644
index 000000000..36dc18034
--- /dev/null
+++ b/utils/test/cli_installer_tests.py
@@ -0,0 +1,1132 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-lines
+
+import copy
+import os
+import ConfigParser
+
+import ooinstall.cli_installer as cli
+
+from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
+from mock import patch
+
+
+MOCK_FACTS = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.1.0.1': {
+ 'common': {
+ 'ip': '10.1.0.1',
+ 'public_ip': '10.1.0.1',
+ 'hostname': 'storage-private.example.com',
+ 'public_hostname': 'storage.example.com'
+ }
+ },
+}
+
+MOCK_FACTS_QUICKHA = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.0.0.4': {
+ 'common': {
+ 'ip': '10.0.0.4',
+ 'public_ip': '10.0.0.4',
+ 'hostname': 'node3-private.example.com',
+ 'public_hostname': 'node3.example.com'
+ }
+ },
+ '10.0.0.5': {
+ 'common': {
+ 'ip': '10.0.0.5',
+ 'public_ip': '10.0.0.5',
+ 'hostname': 'proxy-private.example.com',
+ 'public_hostname': 'proxy.example.com'
+ }
+ },
+ '10.1.0.1': {
+ 'common': {
+ 'ip': '10.1.0.1',
+ 'public_ip': '10.1.0.1',
+ 'hostname': 'storage-private.example.com',
+ 'public_hostname': 'storage.example.com'
+ }
+ },
+}
+
+# Missing connect_to on some hosts:
+BAD_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+QUICKHA_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+QUICKHA_2_MASTER_CONFIG = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_REUSED_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - master_lb
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_NO_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
+"""
+
+QUICKHA_CONFIG_PRECONFIGURED_LB = """
+variant: %s
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: proxy-private.example.com
+ hostname: proxy-private.example.com
+ public_hostname: proxy.example.com
+ preconfigured: true
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
+"""
+
+class UnattendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ self.cli_args.append("-u")
+
+ # unattended with config file and all installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ if result.exception is None or result.exit_code != 1:
+ print "Exit code: %s" % result.exit_code
+ self.fail("Unexpected CLI return")
+
+ # unattended with config file and all installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=False)
+
+ # unattended with config file and no installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and some installed some uninstalled hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+ # unattended with config file and some installed some uninstalled hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, "hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(3, len(hosts))
+ self.assertEquals(3, len(hosts_to_run_on))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_inventory_write(self, load_facts_mock, run_playbook_mock):
+ merged_config = SAMPLE_CONFIG % 'openshift-enterprise'
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), merged_config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Check the inventory file looks as we would expect:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('root',
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # Check the masters:
+ self.assertEquals(1, len(inventory.items('masters')))
+ self.assertEquals(3, len(inventory.items('nodes')))
+
+ for item in inventory.items('masters'):
+ # ansible host lines do NOT parse nicely:
+ master_line = item[0]
+ if item[1] is not None:
+ master_line = "%s=%s" % (master_line, item[1])
+ self.assertTrue('openshift_ip' in master_line)
+ self.assertTrue('openshift_public_ip' in master_line)
+ self.assertTrue('openshift_hostname' in master_line)
+ self.assertTrue('openshift_public_hostname' in master_line)
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_latest_assumed(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # We didn't specify a version so the latest should have been assumed,
+ # and written to disk:
+ self.assertEquals('3.3', written_config['variant_version'])
+
+ # Make sure the correct value was passed to ansible:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_variant_version_preserved(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = '%s\n%s' % (config, 'variant_version: 3.3')
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), config)
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # Make sure our older version was preserved:
+ # and written to disk:
+ self.assertEquals('3.3', written_config['variant_version'])
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while
+ # we wait to see if anyone notices that we took away their ability
+ # to set the ansible_config parameter in the command line options
+ # and in the installer config file.
+ #
+ # We have removed the ability to set the ansible config file
+ # manually so that our new quieter output mode is the default and
+ # only output mode.
+ #
+ # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install
+ # should only output relevant information.
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, None)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, ansible_config, ansible_config)
+
+ # @patch('ooinstall.openshift_ansible.run_ansible')
+ # @patch('ooinstall.openshift_ansible.load_system_facts')
+ # def test_ansible_config_specified_in_installer_config(self,
+ # load_facts_mock, run_ansible_mock):
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+ # config = SAMPLE_CONFIG % 'openshift-enterprise'
+ # config = "%s\nansible_config: %s" % (config, ansible_config)
+ # self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ # config, None, ansible_config)
+
+ # #pylint: disable=too-many-arguments
+ # # This method allows for drastically simpler tests to write, and the args
+ # # are all useful.
+ # def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
+ # installer_config, ansible_config_cli=None, expected_result=None):
+ # """
+ # Utility method for testing the ways you can specify the ansible config.
+ # """
+
+ # load_facts_mock.return_value = (MOCK_FACTS, 0)
+ # run_ansible_mock.return_value = 0
+
+ # config_file = self.write_config(os.path.join(self.work_dir,
+ # 'ooinstall.conf'), installer_config)
+
+ # self.cli_args.extend(["-c", config_file])
+ # if ansible_config_cli:
+ # self.cli_args.extend(["--ansible-config", ansible_config_cli])
+ # self.cli_args.append("install")
+ # result = self.runner.invoke(cli.cli, self.cli_args)
+ # self.assert_result(result, 0)
+
+ # # Test the env vars for facts playbook:
+ # facts_env_vars = load_facts_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
+ # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # # Test the env vars for main playbook:
+ # env_vars = run_ansible_mock.call_args[0][2]
+ # if expected_result:
+ # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
+ # else:
+ # # If user running test has rpm installed, this might be set to default:
+ # #
+ # # By default we will use the quiet config
+ # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG)
+
+ # unattended with bad config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_bad_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ self.assertEquals(1, result.exit_code)
+ self.assertTrue("You must specify either an ip or hostname"
+ in result.output)
+
+ #unattended with three masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(6, len(hosts))
+ self.assertEquals(6, len(hosts_to_run_on))
+
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is an invalid config:
+ self.assert_result(result, 1)
+ self.assertTrue("A minimum of 3 masters are required" in result.output)
+
+ #unattended with three masters, one node, but no load balancer specified:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid input:
+ self.assert_result(result, 1)
+ self.assertTrue('No master load balancer specified in config' in result.output)
+
+ #unattended with three masters, one node, and one of the masters reused as load balancer:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid configuration:
+ self.assert_result(result, 1)
+
+ #unattended with preconfigured lb
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(6, len(hosts))
+ self.assertEquals(6, len(hosts_to_run_on))
+
+class AttendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ # Doesn't exist but keeps us from reading the local users config:
+ self.config_file = os.path.join(self.work_dir, 'config.yml')
+ self.cli_args.extend(["-c", self.config_file])
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 4, 4)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+ # interactive with config file and some installed some uninstalled hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_scaleup_hint(self, load_facts_mock, run_playbook_mock):
+
+ # Modify the mock facts to return a version indicating OpenShift
+ # is already installed on our master, and the first node.
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ],
+ add_nodes=[('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+
+ # This is testing the install workflow so we want to make sure we
+ # exit with the appropriate hint.
+ self.assertTrue('scaleup' in result.output)
+ self.assert_result(result, 1)
+
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
+ cli_input = build_input(confirm_facts='y')
+ self.cli_args.extend(["-c", config_file])
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, 3)
+
+# #interactive with config file and all installed hosts
+# @patch('ooinstall.openshift_ansible.run_main_playbook')
+# @patch('ooinstall.openshift_ansible.load_system_facts')
+# def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+# mock_facts = copy.deepcopy(MOCK_FACTS)
+# mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+# mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+#
+# cli_input = build_input(hosts=[
+# ('10.0.0.1', True, False),
+# ],
+# add_nodes=[('10.0.0.2', False, False)],
+# ssh_user='root',
+# variant_num=1,
+# schedulable_masters_ok=True,
+# confirm_facts='y',
+# storage='10.0.0.1',)
+#
+# self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+# run_playbook_mock,
+# cli_input,
+# exp_hosts_len=2,
+# exp_hosts_to_run_on_len=2,
+# force=False)
+
+ #interactive multimaster: one more node than master
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False),
+ ('10.0.0.4', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 6, 6)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 6)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.4',
+ 'openshift_schedulable=True')
+
+ self.assertTrue(inventory.has_section('etcd'))
+ self.assertEquals(3, len(inventory.items('etcd')))
+
+ #interactive multimaster: identical masters and nodes
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False),
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 5, 5)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 5)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+ # Checks the inventory (as a ConfigParser) for the given host, host
+ # variable, and expected value.
+ def assert_inventory_host_var(self, inventory, section, host, variable):
+ # Config parser splits on the first "=", so we end up with:
+ # 'hostname key1' -> 'val1 key2=val2 key3=val3'
+ #
+ # Convert to something easier to test:
+ for (a, b) in inventory.items(section):
+ full_line = "%s=%s" % (a, b)
+ tokens = full_line.split()
+ if tokens[0] == host:
+ found = False
+ for token in tokens:
+ if token == variable:
+ found = True
+ continue
+ self.assertTrue("Unable to find %s in line: %s" %
+ (variable, full_line), found)
+ return
+ self.fail("unable to find host %s in inventory" % host)
+
+ def assert_inventory_host_var_unset(self, inventory, section, host, variable):
+ # Config parser splits on the first "=", so we end up with:
+ # 'hostname key1' -> 'val1 key2=val2 key3=val3'
+ #
+ # Convert to something easier to test:
+ for (a, b) in inventory.items(section):
+ full_line = "%s=%s" % (a, b)
+ tokens = full_line.split()
+ if tokens[0] == host:
+ self.assertFalse(("%s=" % variable) in full_line,
+ msg='%s host variable was set: %s' %
+ (variable, full_line))
+ return
+ self.fail("unable to find host %s in inventory" % host)
+
+
+ #interactive multimaster: attempting to use a master as the load balancer should fail:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', True, False),
+ ('10.0.0.3', False, False),
+ ('10.0.0.4', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False),
+ storage='10.1.0.1')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ #interactive all-in-one
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_all_in_one(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.0.0.1')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 1, 1)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 1)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=True')
+
+
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_gen_inventory(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True, False),
+ ('10.0.0.2', False, False),
+ ('10.0.0.3', False, False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ storage='10.1.0.1',)
+ self.cli_args.append("install")
+ self.cli_args.append("--gen-inventory")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+
+ # Make sure run playbook wasn't called:
+ self.assertEquals(0, len(run_playbook_mock.mock_calls))
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, 'hosts'))
+ self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
+ 'openshift_schedulable=False')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.2',
+ 'openshift_schedulable=True')
+ self.assert_inventory_host_var_unset(inventory, 'nodes', '10.0.0.3',
+ 'openshift_schedulable=True')
+
+
+# TODO: test with config file, attended add node
+# TODO: test with config file, attended new node already in config file
+# TODO: test with config file, attended new node already in config file, plus manually added nodes
+# TODO: test with config file, attended reject facts
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
new file mode 100644
index 000000000..62135c761
--- /dev/null
+++ b/utils/test/fixture.py
@@ -0,0 +1,254 @@
+# pylint: disable=missing-docstring
+import os
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from test.oo_config_tests import OOInstallFixture
+from click.testing import CliRunner
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+variant_version: 3.3
+master_routingconfig_subdomain: example.com
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+def read_yaml(config_file_path):
+ cfg_f = open(config_file_path, 'r')
+ config = yaml.safe_load(cfg_f.read())
+ cfg_f.close()
+ return config
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, "hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['deployment']['hosts']))
+ for host in written_config['deployment']['hosts']:
+ self.assertTrue('hostname' in host)
+ self.assertTrue('public_hostname' in host)
+ if 'preconfigured' not in host:
+ if 'roles' in host:
+ self.assertTrue('node' in host['roles'] or 'storage' in host['roles'])
+ self.assertTrue('ip' in host)
+ self.assertTrue('public_ip' in host)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(
+ os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ if "If you want to force reinstall" in result.output:
+ # verify we exited on seeing installed hosts
+ self.assertEqual(result.exit_code, 1)
+ else:
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][1]
+ hosts_to_run_on = run_playbook_mock.call_args[0][2]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
+def build_input(ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
+ master_lb=('', False), storage=None):
+ """
+ Build an input string simulating a user entering values in an interactive
+ attended install.
+
+ This is intended to give us one place to update when the CLI prompts change.
+ We should aim to keep this dependent on optional keyword arguments with
+ sensible defaults to keep things from getting too fragile.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
+ if hosts:
+ i = 0
+ for (host, is_master, is_containerized) in hosts:
+ inputs.append(host)
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
+
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+
+ #inputs.append('rpm')
+ # We should not be prompted to add more hosts if we're currently at
+ # 2 masters, this is an invalid HA configuration, so this question
+ # will not be asked, and the user must enter the next host:
+ if num_masters != 2:
+ if i < len(hosts) - 1:
+ if num_masters >= 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ # You can pass a single master_lb or a list if you intend for one to get rejected:
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ if master_lb[0]:
+ inputs.append('y' if master_lb[1] else 'n')
+
+ if storage:
+ inputs.append(storage)
+
+ inputs.append('subdomain.example.com')
+ inputs.append('proxy.example.com')
+ inputs.append('proxy-private.example.com')
+ inputs.append('exclude.example.com')
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ if schedulable_masters_ok:
+ inputs.append('y')
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master, is_containerized) in add_nodes:
+ inputs.append(host)
+ if is_containerized:
+ inputs.append('container')
+ else:
+ inputs.append('rpm')
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ 'y',
+ ])
+
+ return '\n'.join(inputs)
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
new file mode 100644
index 000000000..56fd82408
--- /dev/null
+++ b/utils/test/oo_config_tests.py
@@ -0,0 +1,306 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import cStringIO
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
+import ooinstall.openshift_ansible
+
+SAMPLE_CONFIG = """
+variant: openshift-enterprise
+variant_version: 3.3
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+
+CONFIG_INCOMPLETE_FACTS = """
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
+ public_ip: 24.222.0.2
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+CONFIG_BAD = """
+variant: openshift-enterprise
+version: v2
+deployment:
+ ansible_ssh_user: root
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
+"""
+
+class OOInstallFixture(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='ooconfigtests')
+ self.tempfiles.append(self.work_dir)
+
+ def tearDown(self):
+ for path in self.tempfiles:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+
+ def write_config(self, path, config_str):
+ """
+ Write given config to a temporary file which will be cleaned
+ up in teardown.
+ Returns full path to the file.
+ """
+ cfg_file = open(path, 'w')
+ cfg_file.write(config_str)
+ cfg_file.close()
+ return path
+
+
+
+class OOConfigTests(OOInstallFixture):
+
+ def test_load_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+
+ self.assertEquals(3, len(ooconfig.deployment.hosts))
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].connect_to)
+ self.assertEquals("10.0.0.1", ooconfig.deployment.hosts[0].ip)
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].hostname)
+
+ self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
+ [host.ip for host in ooconfig.deployment.hosts])
+
+ self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+ self.assertEquals('v2', ooconfig.settings['version'])
+
+ def test_load_bad_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_BAD)
+ try:
+ OOConfig(cfg_path)
+ assert False
+ except OOConfigInvalidHostError:
+ assert True
+
+
+ def test_load_complete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(0, len(missing_host_facts))
+
+ # Test missing optional facts the user must confirm:
+ def test_load_host_incomplete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(2, len(missing_host_facts))
+ self.assertEquals(1, len(missing_host_facts['10.0.0.2']))
+ self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
+
+ def test_write_config(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ ooconfig.save_to_disk()
+
+ f = open(cfg_path, 'r')
+ written_config = yaml.safe_load(f.read())
+ f.close()
+
+
+
+ self.assertEquals(3, len(written_config['deployment']['hosts']))
+ for h in written_config['deployment']['hosts']:
+ self.assertTrue('ip' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_hostname' in h)
+
+ self.assertTrue('ansible_ssh_user' in written_config['deployment'])
+ self.assertTrue('variant' in written_config)
+ self.assertEquals('v2', written_config['version'])
+
+ # Some advanced settings should not get written out if they
+ # were not specified by the user:
+ self.assertFalse('ansible_inventory_directory' in written_config)
+
+
+class HostTests(OOInstallFixture):
+
+ def test_load_host_no_ip_or_hostname(self):
+ yaml_props = {
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'master': True
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_load_host_no_master_or_node_specified(self):
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_inventory_file_quotes_node_labels(self):
+ """Verify a host entry wraps openshift_node_labels value in double quotes"""
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'connect_to': 'a-private.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'new_host': True,
+ 'roles': ['node'],
+ 'node_labels': {
+ 'region': 'infra'
+ },
+
+ }
+
+ new_node = Host(**yaml_props)
+ inventory = cStringIO.StringIO()
+ # This is what the 'write_host' function generates. write_host
+ # has no return value, it just writes directly to the file
+ # 'inventory' which in this test-case is a StringIO object
+ ooinstall.openshift_ansible.write_host(
+ new_node,
+ 'node',
+ inventory,
+ schedulable=True)
+ # read the value of what was written to the inventory "file"
+ legacy_inventory_line = inventory.getvalue()
+
+ # Given the `yaml_props` above we should see a line like this:
+ # openshift_node_labels="{'region': 'infra'}"
+ node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"''' # Quotes around the hash
+ node_labels_bad = '''openshift_node_labels={'region': 'infra'}''' # No quotes around the hash
+
+ # The good line is present in the written inventory line
+ self.assertIn(node_labels_expected, legacy_inventory_line)
+ # An unquoted version is not present
+ self.assertNotIn(node_labels_bad, legacy_inventory_line)
+
+
+ # def test_new_write_inventory_same_as_legacy(self):
+ # """Verify the original write_host function produces the same output as the new method"""
+ # yaml_props = {
+ # 'ip': '192.168.0.1',
+ # 'hostname': 'a.example.com',
+ # 'connect_to': 'a-private.example.com',
+ # 'public_ip': '192.168.0.1',
+ # 'public_hostname': 'a.example.com',
+ # 'new_host': True,
+ # 'roles': ['node'],
+ # 'other_variables': {
+ # 'zzz': 'last',
+ # 'foo': 'bar',
+ # 'aaa': 'first',
+ # },
+ # }
+
+ # new_node = Host(**yaml_props)
+ # inventory = cStringIO.StringIO()
+
+ # # This is what the original 'write_host' function will
+ # # generate. write_host has no return value, it just writes
+ # # directly to the file 'inventory' which in this test-case is
+ # # a StringIO object
+ # ooinstall.openshift_ansible.write_host(
+ # new_node,
+ # 'node',
+ # inventory,
+ # schedulable=True)
+ # legacy_inventory_line = inventory.getvalue()
+
+ # # This is what the new method in the Host class generates
+ # new_inventory_line = new_node.inventory_string('node', schedulable=True)
+
+ # self.assertEqual(
+ # legacy_inventory_line,
+ # new_inventory_line)
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
new file mode 100644
index 000000000..2e59d86f2
--- /dev/null
+++ b/utils/test/test_utils.py
@@ -0,0 +1,100 @@
+"""
+Unittests for ooinstall utils.
+"""
+
+import unittest
+import logging
+import sys
+import copy
+from ooinstall.utils import debug_env, is_valid_hostname
+import mock
+
+
+class TestUtils(unittest.TestCase):
+ """
+ Parent unittest TestCase.
+ """
+
+ def setUp(self):
+ self.debug_all_params = {
+ 'OPENSHIFT_FOO': 'bar',
+ 'ANSIBLE_FOO': 'bar',
+ 'OO_FOO': 'bar'
+ }
+
+ self.expected = [
+ mock.call('ANSIBLE_FOO: bar'),
+ mock.call('OPENSHIFT_FOO: bar'),
+ mock.call('OO_FOO: bar'),
+ ]
+
+ # python 2.x has assertItemsEqual, python 3.x has assertCountEqual
+ if sys.version_info.major > 3:
+ self.assertItemsEqual = self.assertCountEqual
+
+ ######################################################################
+ # Validate ooinstall.utils.debug_env functionality
+
+ def test_utils_debug_env_all_debugged(self):
+ """Verify debug_env debugs specific env variables"""
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(self.debug_all_params)
+ print _il.debug.call_args_list
+
+ # Debug was called for each item we expect
+ self.assertEqual(
+ len(self.debug_all_params),
+ _il.debug.call_count)
+
+ # Each item we expect was logged
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ def test_utils_debug_env_some_debugged(self):
+ """Verify debug_env skips non-wanted env variables"""
+ debug_some_params = copy.deepcopy(self.debug_all_params)
+ # This will not be logged by debug_env
+ debug_some_params['MG_FRBBR'] = "SKIPPED"
+
+ with mock.patch('ooinstall.utils.installer_log') as _il:
+ debug_env(debug_some_params)
+
+ # The actual number of debug calls was less than the
+ # number of items passed to debug_env
+ self.assertLess(
+ _il.debug.call_count,
+ len(debug_some_params))
+
+ self.assertItemsEqual(
+ self.expected,
+ _il.debug.call_args_list)
+
+ ######################################################################
+ def test_utils_is_valid_hostname_invalid(self):
+ """Verify is_valid_hostname can detect None or too-long hostnames"""
+ # A hostname that's empty, None, or more than 255 chars is invalid
+ empty_hostname = ''
+ res = is_valid_hostname(empty_hostname)
+ self.assertFalse(res)
+
+ none_hostname = None
+ res = is_valid_hostname(none_hostname)
+ self.assertFalse(res)
+
+ too_long_hostname = "a" * 256
+ res = is_valid_hostname(too_long_hostname)
+ self.assertFalse(res)
+
+ def test_utils_is_valid_hostname_ends_with_dot(self):
+ """Verify is_valid_hostname can parse hostnames with trailing periods"""
+ hostname = "foo.example.com."
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)
+
+ def test_utils_is_valid_hostname_normal_hostname(self):
+ """Verify is_valid_hostname can parse regular hostnames"""
+ hostname = "foo.example.com"
+ res = is_valid_hostname(hostname)
+ self.assertTrue(res)