summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible-bin1
-rw-r--r--.tito/packages/openshift-ansible-inventory1
-rw-r--r--.tito/releasers.conf4
-rw-r--r--README.md2
-rw-r--r--README_AWS.md2
-rw-r--r--README_vagrant.md4
-rw-r--r--Vagrantfile21
-rwxr-xr-xbin/cluster12
-rw-r--r--filter_plugins/oo_filters.py60
-rw-r--r--filter_plugins/oo_zabbix_filters.py51
-rwxr-xr-xgit/pylint.sh2
-rw-r--r--openshift-ansible.spec32
-rw-r--r--playbooks/adhoc/uninstall.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml127
-rw-r--r--playbooks/common/openshift-master/config.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml1
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml8
-rw-r--r--rel-eng/packages/.readme3
-rw-r--r--rel-eng/packages/openshift-ansible-bin1
-rw-r--r--rel-eng/packages/openshift-ansible-inventory1
-rw-r--r--rel-eng/tito.props5
-rw-r--r--roles/copr_cli/README.md38
-rw-r--r--roles/copr_cli/defaults/main.yml2
-rw-r--r--roles/copr_cli/handlers/main.yml2
-rw-r--r--roles/copr_cli/meta/main.yml14
-rw-r--r--roles/copr_cli/tasks/main.yml4
-rw-r--r--roles/copr_cli/vars/main.yml2
-rw-r--r--roles/lib_zabbix/library/zbx_item.py20
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py20
-rw-r--r--roles/lib_zabbix/library/zbx_itservice.py263
-rw-r--r--roles/lib_zabbix/library/zbx_trigger.py5
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py68
-rw-r--r--roles/openshift_facts/tasks/main.yml3
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml9
-rw-r--r--roles/os_zabbix/vars/template_aws.yml2
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml30
-rw-r--r--roles/rhel_subscribe/tasks/main.yml11
-rw-r--r--roles/tito/README.md38
-rw-r--r--roles/tito/defaults/main.yml2
-rw-r--r--roles/tito/handlers/main.yml2
-rw-r--r--roles/tito/meta/main.yml14
-rw-r--r--roles/tito/tasks/main.yml4
-rw-r--r--roles/tito/vars/main.yml2
-rw-r--r--utils/src/ooinstall/cli_installer.py12
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
-rw-r--r--utils/test/cli_installer_tests.py215
50 files changed, 1023 insertions, 146 deletions
diff --git a/.tito/packages/openshift-ansible-bin b/.tito/packages/openshift-ansible-bin
deleted file mode 100644
index 5275dfcf9..000000000
--- a/.tito/packages/openshift-ansible-bin
+++ /dev/null
@@ -1 +0,0 @@
-0.0.21-1 bin/
diff --git a/.tito/packages/openshift-ansible-inventory b/.tito/packages/openshift-ansible-inventory
deleted file mode 100644
index 85502438a..000000000
--- a/.tito/packages/openshift-ansible-inventory
+++ /dev/null
@@ -1 +0,0 @@
-0.0.11-1 inventory/
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index f863ce9b1..a9116291a 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -11,3 +11,7 @@ srpm_disttag = .el7ose
releaser = tito.release.DistGitReleaser
branches = rhaos-3.1-rhel-7
srpm_disttag = .el7aos
+
+[copr-openshift-ansible]
+releaser = tito.release.CoprReleaser
+project_name = openshift-ansible
diff --git a/README.md b/README.md
index 635df36a0..cef2ed0b6 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
- Install base dependencies:
- Fedora:
```
- yum install -y ansible rubygem-thor rubygem-parseconfig util-linux
+ dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
```
- OSX:
```
diff --git a/README_AWS.md b/README_AWS.md
index 6757e2892..d9e2ac5a9 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -105,7 +105,7 @@ Install Dependencies
1. Ansible requires python-boto for aws operations:
RHEL/CentOS/Fedora
```
- yum install -y ansible python-boto
+ yum install -y ansible python-boto pyOpenSSL
```
OSX:
```
diff --git a/README_vagrant.md b/README_vagrant.md
index f3e4cfc18..73fd31476 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -3,7 +3,6 @@ Requirements
- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
- vagrant (tested against version 1.7.2)
- vagrant-hostmanager plugin (tested against version 1.5.0)
-- vagrant-registration plugin (only required for enterprise deployment type)
- vagrant-libvirt (tested against version 0.0.26)
- Only required if using libvirt instead of virtualbox
@@ -44,7 +43,8 @@ The following environment variables can be overriden:
- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
-For ``enterprise`` deployment types these env variables should also be specified:
+Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:
+
- ``rhel_subscription_user``: rhsm user
- ``rhel_subscription_pass``: rhsm password
- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects
diff --git a/Vagrantfile b/Vagrantfile
index 33532cd63..362e1ff48 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -16,27 +16,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.hostmanager.include_offline = true
config.ssh.insert_key = false
- if deployment_type === 'enterprise'
- unless Vagrant.has_plugin?('vagrant-registration')
- raise 'vagrant-registration-plugin is required for enterprise deployment'
- end
- username = ENV['rhel_subscription_user']
- password = ENV['rhel_subscription_pass']
- unless username and password
- raise 'rhel_subscription_user and rhel_subscription_pass are required'
- end
- config.registration.username = username
- config.registration.password = password
- # FIXME this is temporary until vagrant/ansible registration modules
- # are capable of handling specific subscription pools
- if not ENV['rhel_subscription_pool'].nil?
- config.vm.provision "shell" do |s|
- s.inline = "subscription-manager attach --pool=$1 || true"
- s.args = "#{ENV['rhel_subscription_pool']}"
- end
- end
- end
-
config.vm.provider "virtualbox" do |vbox, override|
override.vm.box = "centos/7"
vbox.memory = 1024
diff --git a/bin/cluster b/bin/cluster
index 59a6755d3..220f11d49 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -57,7 +57,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
env['num_masters'] = args.masters
@@ -74,7 +74,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -86,7 +86,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -98,7 +98,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -110,7 +110,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -124,7 +124,7 @@ class Cluster(object):
'deployment_type': self.get_deployment_type(args),
'new_cluster_state': args.state}
- playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 9a17913c4..2386b5878 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -401,6 +401,63 @@ class FilterModule(object):
"certificate names in host inventory"))
return certificates
+ @staticmethod
+ def oo_pretty_print_cluster(data):
+ ''' Read a subset of hostvars and build a summary of the cluster
+ in the following layout:
+
+"c_id": {
+ "master": [
+ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1", "subtype": "default" }]
+ "node": [
+ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2", "subtype": "infra" },
+ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3", "subtype": "compute" },
+ ...
+ ]}
+ '''
+
+ def _get_tag_value(tags, key):
+ ''' Extract values of a map implemented as a set.
+ Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
+ key = 'bar'
+ returns 'value2'
+ '''
+ for tag in tags:
+ # Skip tag_env-host-type to avoid ambiguity with tag_env
+ if tag[:17] == 'tag_env-host-type':
+ continue
+ if tag[:len(key)+4] == 'tag_' + key:
+ return tag[len(key)+5:]
+ raise KeyError(key)
+
+ def _add_host(clusters,
+ env,
+ host_type,
+ sub_host_type,
+ host):
+ ''' Add a new host in the clusters data structure '''
+ if env not in clusters:
+ clusters[env] = {}
+ if host_type not in clusters[env]:
+ clusters[env][host_type] = {}
+ if sub_host_type not in clusters[env][host_type]:
+ clusters[env][host_type][sub_host_type] = []
+ clusters[env][host_type][sub_host_type].append(host)
+
+ clusters = {}
+ for host in data:
+ try:
+ _add_host(clusters=clusters,
+ env=_get_tag_value(host['group_names'], 'env'),
+ host_type=_get_tag_value(host['group_names'], 'host-type'),
+ sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
+ host={'name': host['inventory_hostname'],
+ 'public IP': host['ansible_ssh_host'],
+ 'private IP': host['ansible_default_ipv4']['address']})
+ except KeyError:
+ pass
+ return clusters
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -418,5 +475,6 @@ class FilterModule(object):
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
"oo_parse_certificate_names": self.oo_parse_certificate_names,
- "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
+ "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
+ "oo_pretty_print_cluster": self.oo_pretty_print_cluster
}
diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py
index c44b874e8..fcfe43777 100644
--- a/filter_plugins/oo_zabbix_filters.py
+++ b/filter_plugins/oo_zabbix_filters.py
@@ -95,6 +95,54 @@ class FilterModule(object):
return data
+ @staticmethod
+ def itservice_results_builder(data, clusters, keys):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for cluster in clusters:
+ for results in data:
+ if cluster == results['item'][0]:
+ results = results['results']
+ if results and len(results) > 0 and all([results[0].has_key(_key) for _key in keys]):
+ tmp = {}
+ tmp['clusterid'] = cluster
+ for key in keys:
+ tmp[key] = results[0][key]
+ r_list.append(tmp)
+
+ return r_list
+
+ @staticmethod
+ def itservice_dependency_builder(data, cluster):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for dep in data:
+ if cluster == dep['clusterid']:
+ r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'})
+
+ return r_list
+
+ @staticmethod
+ def itservice_dep_builder_list(data):
+ '''Take a list of dict results,
+ loop through each results and create a hash
+ of:
+ [{clusterid: cluster1, key: 111 }]
+ '''
+ r_list = []
+ for dep in data:
+ r_list.append({'name': '%s' % dep, 'dep_type': 'hard'})
+
+ return r_list
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -105,4 +153,7 @@ class FilterModule(object):
"create_data": self.create_data,
"oo_build_zabbix_collect": self.oo_build_zabbix_collect,
"oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
+ "itservice_results_builder": self.itservice_results_builder,
+ "itservice_dependency_builder": self.itservice_dependency_builder,
+ "itservice_dep_builder_list": self.itservice_dep_builder_list,
}
diff --git a/git/pylint.sh b/git/pylint.sh
index 55e8b6131..f29c055dc 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do
fi
done
+export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
+
if [ "${FILES_TO_TEST}" != "" ]; then
echo "Testing files: ${FILES_TO_TEST}"
exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index f2623cdd7..a005320af 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -13,7 +13,8 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible
+Requires: ansible >= 1.9.3
+Requires: python2
%description
Openshift and Atomic Enterprise Ansible
@@ -96,8 +97,9 @@ popd
# ----------------------------------------------------------------------------------
%package bin
Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
-Requires: %{name}-inventory
-Requires: python2
+Requires: %{name} = %{version}
+Requires: %{name}-inventory = %{version}
+Requires: %{name}-playbooks = %{version}
BuildRequires: python2-devel
BuildArch: noarch
@@ -117,7 +119,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
# ----------------------------------------------------------------------------------
%package docs
Summary: Openshift and Atomic Enterprise Ansible documents
-Requires: %{name}
+Requires: %{name} = %{version}
BuildArch: noarch
%description docs
@@ -131,7 +133,7 @@ BuildArch: noarch
# ----------------------------------------------------------------------------------
%package inventory
Summary: Openshift and Atomic Enterprise Ansible Inventories
-Requires: python2
+Requires: %{name} = %{version}
BuildArch: noarch
%description inventory
@@ -144,7 +146,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
%package inventory-aws
Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
-Requires: %{name}-inventory
+Requires: %{name}-inventory = %{version}
Requires: python-boto
BuildArch: noarch
@@ -156,7 +158,7 @@ Ansible Inventories for AWS used with the openshift-ansible scripts and playbook
%package inventory-gce
Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
-Requires: %{name}-inventory
+Requires: %{name}-inventory = %{version}
Requires: python-libcloud >= 0.13
BuildArch: noarch
@@ -172,10 +174,10 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
# ----------------------------------------------------------------------------------
%package playbooks
Summary: Openshift and Atomic Enterprise Ansible Playbooks
-Requires: %{name}
-Requires: %{name}-roles
-Requires: %{name}-lookup-plugins
-Requires: %{name}-filter-plugins
+Requires: %{name} = %{version}
+Requires: %{name}-roles = %{version}
+Requires: %{name}-lookup-plugins = %{version}
+Requires: %{name}-filter-plugins = %{version}
BuildArch: noarch
%description playbooks
@@ -191,8 +193,8 @@ BuildArch: noarch
%package roles
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name}
-Requires: %{name}-lookup-plugins
-Requires: %{name}-filter-plugins
+Requires: %{name}-lookup-plugins = %{version}
+Requires: %{name}-filter-plugins = %{version}
BuildArch: noarch
%description roles
@@ -238,9 +240,7 @@ BuildArch: noarch
%package -n atomic-openshift-utils
Summary: Atomic OpenShift Utilities
BuildRequires: python-setuptools
-Requires: openshift-ansible-playbooks
-Requires: openshift-ansible-roles
-Requires: ansible
+Requires: %{name}-playbooks >= %{version}
Requires: python-click
Requires: python-setuptools
Requires: PyYAML
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index e0dbad900..565bb20a8 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -103,7 +103,7 @@
- shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
- - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
changed_when: False
failed_when: False
with_items:
@@ -152,10 +152,14 @@
- /etc/sysconfig/atomic-enterprise-master
- /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/openshift-master
- /etc/sysconfig/openshift-node
- /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
- /etc/sysconfig/origin-node
- /root/.kube
- /run/openshift-sdn
@@ -165,6 +169,16 @@
- /var/lib/openshift
- /var/lib/origin
- /var/lib/pacemaker
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
- name: restart docker
service: name=docker state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
index ed4ab6d1b..b5459f312 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -83,7 +83,7 @@ def get(obj, *paths):
# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
+def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
"""
Prints out results in human friendly way.
@@ -93,15 +93,16 @@ def pretty_print_errors(namespace, kind, item_name, container_name, port_name, v
- `item_name`: Name of the resource
- `container_name`: Name of the container. May be "" when kind=Service.
- `port_name`: Name of the port
+ - `invalid_label`: The label of the invalid port. Port.name/targetPort
- `valid`: True if the port is valid
"""
if not valid:
if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" Port="%s")' % (
- kind, item_name, namespace, container_name, port_name))
+ print('%s/%s -n %s (Container="%s" %s="%s")' % (
+ kind, item_name, namespace, container_name, invalid_label, port_name))
else:
- print('%s/%s -n %s (Port="%s")' % (
- kind, item_name, namespace, port_name))
+ print('%s/%s -n %s (%s="%s")' % (
+ kind, item_name, namespace, invalid_label, port_name))
def print_validation_header():
@@ -160,7 +161,7 @@ def main():
print_validation_header()
pretty_print_errors(
namespace, kind, item_name,
- container_name, port_name, valid)
+ container_name, "Port.name", port_name, valid)
# Services follow a different flow
for item in list_items('services'):
@@ -176,7 +177,8 @@ def main():
first_error = False
print_validation_header()
pretty_print_errors(
- namespace, "services", item_name, "", port_name, valid)
+ namespace, "services", item_name, "",
+ "targetPort", port_name, valid)
# If we had at least 1 error then exit with 1
if not first_error:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 78797f8b8..eea147229 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -10,7 +10,7 @@
roles:
- openshift_facts
-- name: Evaluate etcd_hosts_to_backup
+- name: Evaluate additional groups for upgrade
hosts: localhost
tasks:
- name: Evaluate etcd_hosts_to_backup
@@ -52,7 +52,7 @@
- name: Verify upgrade can proceed
- hosts: masters:nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
tasks:
- name: Clean yum cache
command: yum clean all
@@ -78,6 +78,29 @@
msg: Atomic OpenShift 3.1 packages not found
when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+
###############################################################################
# Backup etcd
@@ -90,6 +113,7 @@
roles:
- openshift_facts
tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
- openshift_facts:
role: etcd
local_facts: {}
@@ -134,11 +158,32 @@
etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
--backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+ - set_fact:
+ etcd_backup_complete: True
+
- name: Display location of etcd backup
debug:
msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
+
+
+
###############################################################################
# Upgrade Masters
###############################################################################
@@ -152,7 +197,7 @@
changed_when: False
- name: Update deployment type
- hosts: OSEv3
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
roles:
- openshift_facts
post_tasks:
@@ -161,6 +206,16 @@
local_facts:
deployment_type: "{{ deployment_type }}"
+- name: Update master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
- name: Upgrade master packages and configuration
hosts: oo_masters_to_config
vars:
@@ -290,6 +345,30 @@
changed_when: False
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+
###############################################################################
# Upgrade Nodes
###############################################################################
@@ -309,6 +388,26 @@
- name: Ensure node service enabled
service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+ - set_fact:
+ node_update_complete: True
+
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
###############################################################################
# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
@@ -356,6 +455,28 @@
when: openshift_master_ha | bool
run_once: true
+ - set_fact:
+ reconcile_complete: True
+
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
+
+
+
###############################################################################
# Post upgrade - Upgrade default router, default registry and examples
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1b3fba3aa..b1da85d5d 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -51,9 +51,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- - role: etcd
- local_facts: {}
- when: openshift.master.embedded_etcd | bool
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index eaedc4d0d..5954bb01e 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -18,6 +18,12 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 4b91c6da8..4825207c9 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -81,7 +81,7 @@
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
with_together:
- instances
- ips
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index df200e374..870bcf2a6 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -6,6 +6,7 @@
<ansible:tag>env-{{ cluster }}</ansible:tag>
<ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
<ansible:tag>host-type-{{ type }}</ansible:tag>
+ <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
</ansible:tags>
</metadata>
<currentMemory unit='GiB'>1</currentMemory>
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index a75e350c7..fa194b072 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -19,6 +19,12 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/rel-eng/packages/.readme b/rel-eng/packages/.readme
deleted file mode 100644
index 8999c8dbc..000000000
--- a/rel-eng/packages/.readme
+++ /dev/null
@@ -1,3 +0,0 @@
-the rel-eng/packages directory contains metadata files
-named after their packages. Each file has the latest tagged
-version and the project's relative directory.
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
deleted file mode 100644
index 11c2906f0..000000000
--- a/rel-eng/packages/openshift-ansible-bin
+++ /dev/null
@@ -1 +0,0 @@
-0.0.19-1 bin/
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
deleted file mode 100644
index 733c626cf..000000000
--- a/rel-eng/packages/openshift-ansible-inventory
+++ /dev/null
@@ -1 +0,0 @@
-0.0.9-1 inventory/
diff --git a/rel-eng/tito.props b/rel-eng/tito.props
deleted file mode 100644
index eab3f190d..000000000
--- a/rel-eng/tito.props
+++ /dev/null
@@ -1,5 +0,0 @@
-[buildconfig]
-builder = tito.builder.Builder
-tagger = tito.tagger.VersionTagger
-changelog_do_not_remove_cherrypick = 0
-changelog_format = %s (%ae)
diff --git a/roles/copr_cli/README.md b/roles/copr_cli/README.md
new file mode 100644
index 000000000..edc68454e
--- /dev/null
+++ b/roles/copr_cli/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Copr CLI.
+
+https://apps.fedoraproject.org/packages/copr-cli/
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+ - hosts: servers
+ roles:
+ - role: copr_cli
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest
diff --git a/roles/copr_cli/defaults/main.yml b/roles/copr_cli/defaults/main.yml
new file mode 100644
index 000000000..3b8adf910
--- /dev/null
+++ b/roles/copr_cli/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for copr_cli
diff --git a/roles/copr_cli/handlers/main.yml b/roles/copr_cli/handlers/main.yml
new file mode 100644
index 000000000..c3dec5a4c
--- /dev/null
+++ b/roles/copr_cli/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for copr_cli
diff --git a/roles/copr_cli/meta/main.yml b/roles/copr_cli/meta/main.yml
new file mode 100644
index 000000000..f050281fd
--- /dev/null
+++ b/roles/copr_cli/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Thomas Wiest
+ description: Manages Copr CLI
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - packaging
+dependencies: []
diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml
new file mode 100644
index 000000000..f7ef1c26e
--- /dev/null
+++ b/roles/copr_cli/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- yum:
+ name: copr-cli
+ state: present
diff --git a/roles/copr_cli/vars/main.yml b/roles/copr_cli/vars/main.yml
new file mode 100644
index 000000000..1522c94d9
--- /dev/null
+++ b/roles/copr_cli/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for copr_cli
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
index 5dc3cff9b..996c98fae 100644
--- a/roles/lib_zabbix/library/zbx_item.py
+++ b/roles/lib_zabbix/library/zbx_item.py
@@ -41,6 +41,24 @@ def exists(content, key='result'):
return True
+def get_data_type(data_type):
+ '''
+ Possible values:
+ 0 - decimal;
+ 1 - octal;
+ 2 - hexadecimal;
+ 3 - bool;
+ '''
+ vtype = 0
+ if 'octal' in data_type:
+ vtype = 1
+ elif 'hexadecimal' in data_type:
+ vtype = 2
+ elif 'bool' in data_type:
+ vtype = 3
+
+ return vtype
+
def get_value_type(value_type):
'''
Possible values:
@@ -158,6 +176,7 @@ def main():
template_name=dict(default=None, type='str'),
zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='int', type='str'),
+ data_type=dict(default='decimal', type='str'),
interval=dict(default=60, type='int'),
delta=dict(default=0, type='int'),
multiplier=dict(default=None, type='str'),
@@ -219,6 +238,7 @@ def main():
'hostid': templateid[0],
'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
+ 'data_type': get_data_type(module.params['data_type']),
'applications': get_app_ids(module.params['applications'], app_name_ids),
'formula': formula,
'multiplier': use_multiplier,
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index 43498c015..aca9c8336 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -116,6 +116,24 @@ def get_zabbix_type(ztype):
return _vtype
+def get_data_type(data_type):
+ '''
+ Possible values:
+ 0 - decimal;
+ 1 - octal;
+ 2 - hexadecimal;
+ 3 - bool;
+ '''
+ vtype = 0
+ if 'octal' in data_type:
+ vtype = 1
+ elif 'hexadecimal' in data_type:
+ vtype = 2
+ elif 'bool' in data_type:
+ vtype = 3
+
+ return vtype
+
def get_value_type(value_type):
'''
Possible values:
@@ -175,6 +193,7 @@ def main():
interfaceid=dict(default=None, type='int'),
zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
+ data_type=dict(default='decimal', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
@@ -238,6 +257,7 @@ def main():
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
+ 'data_type': get_data_type(module.params['data_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
'formula': formula,
'multiplier': use_multiplier,
diff --git a/roles/lib_zabbix/library/zbx_itservice.py b/roles/lib_zabbix/library/zbx_itservice.py
new file mode 100644
index 000000000..a5ee97e15
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_itservice.py
@@ -0,0 +1,263 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix itservices
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix itservice ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_parent(dependencies):
+ '''Put dependencies into the proper update format'''
+ rval = None
+ for dep in dependencies:
+ if dep['relationship'] == 'parent':
+ return dep
+ return rval
+
+def format_dependencies(dependencies):
+ '''Put dependencies into the proper update format'''
+ rval = []
+ for dep in dependencies:
+ rval.append({'dependsOnServiceid': dep['serviceid'],
+ 'soft': get_dependency_type(dep['dep_type']),
+ })
+
+ return rval
+
+def get_dependency_type(dep_type):
+ '''Determine the dependency type'''
+ rval = 0
+ if 'soft' == dep_type:
+ rval = 1
+
+ return rval
+
+def get_service_id_by_name(zapi, dependencies):
+ '''Fetch the service id for an itservice'''
+ deps = []
+ for dep in dependencies:
+ if dep['name'] == 'root':
+ deps.append(dep)
+ continue
+
+ content = zapi.get_content('service',
+ 'get',
+ {'filter': {'name': dep['name']},
+ 'selectDependencies': 'extend',
+ })
+ if content.has_key('result') and content['result']:
+ dep['serviceid'] = content['result'][0]['serviceid']
+ deps.append(dep)
+
+ return deps
+
+def add_dependencies(zapi, service_name, dependencies):
+ '''Fetch the service id for an itservice
+
+ Add a dependency on the parent for this current service item.
+ '''
+
+ results = get_service_id_by_name(zapi, [{'name': service_name}])
+
+ content = {}
+ for dep in dependencies:
+ content = zapi.get_content('service',
+ 'adddependencies',
+ {'serviceid': results[0]['serviceid'],
+ 'dependsOnServiceid': dep['serviceid'],
+ 'soft': get_dependency_type(dep['dep_type']),
+ })
+ if content.has_key('result') and content['result']:
+ continue
+ else:
+ break
+
+ return content
+
+def get_show_sla(inc_sla):
+ ''' Determine the showsla paramter
+ '''
+ rval = 1
+ if 'do not cacluate' in inc_sla:
+ rval = 0
+ return rval
+
+def get_algorithm(inc_algorithm_str):
+ '''
+ Determine which type algorithm
+ '''
+ rval = 0
+ if 'at least one' in inc_algorithm_str:
+ rval = 1
+ elif 'all' in inc_algorithm_str:
+ rval = 2
+
+ return rval
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_itservice
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ algorithm=dict(default='do not calculate', choices=['do not calculate', 'at least one', 'all'], type='str'),
+ show_sla=dict(default='calculate', choices=['do not calculate', 'calculate'], type='str'),
+ good_sla=dict(default='99.9', type='float'),
+ sort_order=dict(default=1, type='int'),
+ state=dict(default='present', type='str'),
+ trigger_id=dict(default=None, type='int'),
+ dependencies=dict(default=[], type='list'),
+ dep_type=dict(default='hard', choices=['hard', 'soft'], type='str'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'service'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ 'selectDependencies': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['serviceid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ dependencies = get_service_id_by_name(zapi, module.params['dependencies'])
+ params = {'name': module.params['name'],
+ 'algorithm': get_algorithm(module.params['algorithm']),
+ 'showsla': get_show_sla(module.params['show_sla']),
+ 'goodsla': module.params['good_sla'],
+ 'sortorder': module.params['sort_order'],
+ 'triggerid': module.params['trigger_id']
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ if dependencies:
+ content = add_dependencies(zapi, module.params['name'], dependencies)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ params['dependencies'] = dependencies
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'goodsla':
+ if float(value) != float(zab_results[key]):
+ differences[key] = value
+
+ elif key == 'dependencies':
+ zab_dep_ids = [item['serviceid'] for item in zab_results[key]]
+ user_dep_ids = [item['serviceid'] for item in dependencies]
+ if set(zab_dep_ids) != set(user_dep_ids):
+ differences[key] = format_dependencies(dependencies)
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ differences['serviceid'] = zab_results['serviceid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py
index ab7731faa..b5faefa70 100644
--- a/roles/lib_zabbix/library/zbx_trigger.py
+++ b/roles/lib_zabbix/library/zbx_trigger.py
@@ -136,6 +136,8 @@ def main():
status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
template_name=dict(default=None, type='str'),
+ hostgroup_name=dict(default=None, type='str'),
+ query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
),
#supports_check_mode=True
)
@@ -157,10 +159,11 @@ def main():
content = zapi.get_content(zbx_class_name,
'get',
- {'filter': {'description': tname},
+ {module.params['query_type']: {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
'templateids': templateid,
+ 'group': module.params['hostgroup_name'],
})
# Get
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 44c4e6766..2992505bf 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -33,6 +33,7 @@
key: "{{ item.key }}"
name: "{{ item.name | default(item.key, true) }}"
value_type: "{{ item.value_type | default('int') }}"
+ data_type: "{{ item.data_type | default('decimal') }}"
description: "{{ item.description | default('', True) }}"
multiplier: "{{ item.multiplier | default('', True) }}"
units: "{{ item.units | default('', True) }}"
@@ -81,6 +82,7 @@
key: "{{ item.key }}"
discoveryrule_key: "{{ item.discoveryrule_key }}"
value_type: "{{ item.value_type }}"
+ data_type: "{{ item.data_type | default('decimal') }}"
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 828ddc49b..51e3ef1c0 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -24,8 +24,23 @@ import StringIO
import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
-from netaddr import IPNetwork
+import struct
+import socket
+def first_ip(network):
+ """ Return the first IPv4 address in network
+
+ Args:
+ network (str): network in CIDR format
+ Returns:
+ str: first IPv4 address
+ """
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+
+ (address, netmask) = network.split('/')
+ netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
+ return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -525,7 +540,7 @@ def set_aggregate_facts(facts):
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
- first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
+ first_svc_ip = first_ip(facts['master']['portal_net'])
all_hostnames.add(first_svc_ip)
internal_hostnames.add(first_svc_ip)
@@ -543,8 +558,10 @@ def set_etcd_facts_if_unset(facts):
If anything goes wrong parsing these, the fact will not be set.
"""
- if 'etcd' in facts:
- if 'master' in facts and facts['master']['embedded_etcd']:
+ if 'master' in facts and facts['master']['embedded_etcd']:
+ etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+ if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
@@ -553,28 +570,37 @@ def set_etcd_facts_if_unset(facts):
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
- facts['etcd']['etcd_data_dir'] = \
+ etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
+
+ facts['etcd'] = etcd_facts
+
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
- else:
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
- ini_fp = StringIO.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
- facts['etcd']['etcd_data_dir'] = etcd_data_dir
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
+ else:
+ etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+ # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+ try:
+ # Add a fake section for parsing:
+ ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+ ini_fp = StringIO.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+ if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+ etcd_data_dir = etcd_data_dir[1:-1]
+
+ etcd_facts['etcd_data_dir'] = etcd_data_dir
+ facts['etcd'] = etcd_facts
+
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+
return facts
def set_deployment_facts_if_unset(facts):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index a28aa7ba2..913f0dc78 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -6,10 +6,9 @@
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
-- name: Ensure python-netaddr and PyYaml are installed
+- name: Ensure PyYaml is installed
yum: pkg={{ item }} state=installed
with_items:
- - python-netaddr
- PyYAML
- name: Gather Cluster facts
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index b812e81df..5cd4a6041 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -4,9 +4,14 @@
pkg: glusterfs-fuse
state: installed
-- name: Set seboolean to allow gluster storage plugin access from containers
+- name: Set sebooleans to allow gluster storage plugin access from containers
seboolean:
- name: virt_use_fusefs
+ name: "{{ item }}"
state: yes
persistent: yes
when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean item does not exist' not in sebool_result.msg"
diff --git a/roles/os_zabbix/vars/template_aws.yml b/roles/os_zabbix/vars/template_aws.yml
index 0ed682128..57832a3fe 100644
--- a/roles/os_zabbix/vars/template_aws.yml
+++ b/roles/os_zabbix/vars/template_aws.yml
@@ -4,7 +4,7 @@ g_template_aws:
zdiscoveryrules:
- name: disc.aws
key: disc.aws
- lifetime: 1
+ lifetime: 14
description: "Dynamically register AWS bucket info"
zitemprototypes:
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 6defc4989..174486e15 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -13,6 +13,13 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.api.healthz
+ description: "Checks the healthz check of the master's api: https://master_host/healthz"
+ type: int
+ data_type: bool
+ applications:
+ - Openshift Master
+
- key: openshift.master.user.count
description: Shows number of users in a cluster
type: int
@@ -24,8 +31,20 @@ g_template_openshift_master:
type: int
applications:
- Openshift Master
-
- - key: openshift.project.counter
+
+ - key: openshift.master.pod.user.running.count
+ description: Shows number of user pods running (non infrastructure pods)
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pod.total.count
+ description: Shows total number of pods (running and non running)
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.project.count
description: Shows number of projects on a cluster
type: int
applications:
@@ -109,6 +128,11 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
priority: avg
+ - name: 'Openshift Master API health check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: high
+
- name: 'Openshift Master process not running on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -125,7 +149,7 @@ g_template_openshift_master:
priority: info
- name: 'There are no projects running on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
+ expression: '{Template Openshift Master:openshift.project.count.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 8fb2fc042..30c0920a1 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -6,19 +6,26 @@
- set_fact:
rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
+ rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rsub_user or the rhel_subscription_user env variable are required for this role.
+ msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
when: rhel_subscription_user is not defined
- fail:
- msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role.
+ msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
when: rhel_subscription_pass is not defined
+- name: Satellite preparation
+ command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ args:
+ creates: /etc/rhsm/ca/katello-server-ca.pem
+ when: rhel_subscription_server is defined and rhel_subscription_server
+
- name: RedHat subscriptions
redhat_subscription:
username: "{{ rhel_subscription_user }}"
diff --git a/roles/tito/README.md b/roles/tito/README.md
new file mode 100644
index 000000000..c4e2856dc
--- /dev/null
+++ b/roles/tito/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Tito.
+
+https://github.com/dgoodwin/tito
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+ - hosts: servers
+ roles:
+ - role: tito
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest
diff --git a/roles/tito/defaults/main.yml b/roles/tito/defaults/main.yml
new file mode 100644
index 000000000..dd7cd269e
--- /dev/null
+++ b/roles/tito/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for tito
diff --git a/roles/tito/handlers/main.yml b/roles/tito/handlers/main.yml
new file mode 100644
index 000000000..e9ce609d5
--- /dev/null
+++ b/roles/tito/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for tito
diff --git a/roles/tito/meta/main.yml b/roles/tito/meta/main.yml
new file mode 100644
index 000000000..fb121c08e
--- /dev/null
+++ b/roles/tito/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Thomas Wiest
+ description: Manages Tito
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - packaging
+dependencies: []
diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml
new file mode 100644
index 000000000..f7b4ef363
--- /dev/null
+++ b/roles/tito/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- yum:
+ name: tito
+ state: present
diff --git a/roles/tito/vars/main.yml b/roles/tito/vars/main.yml
new file mode 100644
index 000000000..8a1aafc41
--- /dev/null
+++ b/roles/tito/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for tito
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 10949763f..f34255234 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -190,7 +190,7 @@ Notes:
facts_confirmed = click.confirm("Do the above facts look correct?")
if not facts_confirmed:
message = """
-Edit %s with the desired values and rerun atomic-openshift-installer with --unattended .
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
""" % oo_cfg.config_path
click.echo(message)
# Make sure we actually write out the config file.
@@ -338,7 +338,9 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
if not unattended:
click.echo('By default the installer only adds new nodes to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
- '(2) perform a clean install?', type=int)
+ '(2) reinstall the existing hosts ' \
+ 'potentially erasing any custom changes?',
+ type=int)
# TODO: this should be reworked with error handling.
# Click can certainly do this for us.
# This should be refactored as soon as we add a 3rd option.
@@ -431,8 +433,10 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
- The main click CLI module. Responsible for handling most common CLI options,
- assigning any defaults and adding to the context for the sub-commands.
+ atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host.
+ It can also be run in unattended mode if provided with a configuration file.
+
+ Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
"""
ctx.obj = {}
ctx.obj['unattended'] = unattended
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index e4c808e85..372f27bda 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -82,7 +82,7 @@ def write_host(host, inventory, scheduleable=True):
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
- no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
if no_pwd_sudo == 1:
print 'The atomic-openshift-installer requires sudo access without a password.'
sys.exit(1)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index d58539b18..fc16d9ceb 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -46,20 +46,20 @@ SAMPLE_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- - connect_to: master-private.example.com
+ - connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- - connect_to: node1-private.example.com
+ - connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- - connect_to: node2-private.example.com
+ - connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
@@ -98,6 +98,76 @@ class OOCliFixture(OOInstallFixture):
f.close()
return config
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ print written_config['hosts']
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue(h['node'])
+ self.assertTrue('ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('public_hostname' in h)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = self._read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
class UnattendedCliTests(OOCliFixture):
@@ -105,6 +175,92 @@ class UnattendedCliTests(OOCliFixture):
OOCliFixture.setUp(self)
self.cli_args.append("-u")
+ # unattended with config file and all installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ if result.exception is None or result.exit_code != 1:
+ print "Exit code: %s" % result.exit_code
+ self.fail("Unexpected CLI return")
+
+ # unattended with config file and all installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=False)
+
+ # unattended with config file and no installed hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+ self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
+ # unattended with config file and some installed some uninstalled hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
+ # unattended with config file and some installed some uninstalled hosts (with --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+ cli_input=None,
+ exp_hosts_len=3,
+ exp_hosts_to_run_on_len=3,
+ force=True)
+
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
@@ -363,35 +519,6 @@ class AttendedCliTests(OOCliFixture):
return '\n'.join(inputs)
- def _verify_load_facts(self, load_facts_mock):
- """ Check that we ran load facts with expected inputs. """
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
- def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
- """ Check that we ran playbook with expected inputs. """
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
- def _verify_config_hosts(self, written_config, host_count):
- self.assertEquals(host_count, len(written_config['hosts']))
- for h in written_config['hosts']:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
- self.assertTrue('hostname' in h)
- self.assertTrue('public_ip' in h)
- self.assertTrue('public_hostname' in h)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
@@ -416,6 +543,7 @@ class AttendedCliTests(OOCliFixture):
written_config = self._read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
+ # interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_add_nodes(self, load_facts_mock, run_playbook_mock):
@@ -472,6 +600,29 @@ class AttendedCliTests(OOCliFixture):
written_config = self._read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
+ #interactive with config file and all installed hosts
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ],
+ add_nodes=[('10.0.0.2', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+
+ self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+ run_playbook_mock,
+ cli_input,
+ exp_hosts_len=2,
+ exp_hosts_to_run_on_len=2,
+ force=False)
+
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes