summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_vagrant.md1
-rw-r--r--bin/README_SHELL_COMPLETION2
-rw-r--r--bin/openshift_ansible.conf.example2
-rw-r--r--bin/openshift_ansible/awsutil.py11
l---------bin/openshift_ansible/multi_ec2.py1
l---------bin/openshift_ansible/multi_inventory.py1
-rwxr-xr-xbin/ossh_bash_completion20
-rw-r--r--bin/ossh_zsh_completion10
-rw-r--r--bin/zsh_functions/_ossh4
-rw-r--r--filter_plugins/oo_filters.py80
-rw-r--r--inventory/byo/hosts.example57
-rwxr-xr-xinventory/gce/hosts/gce.py32
-rw-r--r--inventory/multi_ec2.yaml.example32
-rwxr-xr-xinventory/multi_inventory.py (renamed from inventory/multi_ec2.py)144
-rw-r--r--inventory/multi_inventory.yaml.example51
-rw-r--r--openshift-ansible.spec222
-rw-r--r--playbooks/adhoc/uninstall.yml13
l---------playbooks/adhoc/upgrades/filter_plugins1
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml8
-rw-r--r--playbooks/byo/openshift-cluster/config.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md (renamed from playbooks/adhoc/upgrades/README.md)10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md17
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/config.yml67
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml76
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml16
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check188
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh10
l---------playbooks/common/openshift-cluster/upgrades/filter_plugins1
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py154
l---------playbooks/common/openshift-cluster/upgrades/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/roles1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml (renamed from playbooks/adhoc/upgrades/upgrade.yml)36
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml407
-rw-r--r--playbooks/common/openshift-etcd/config.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml108
-rw-r--r--playbooks/common/openshift-node/config.yml78
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml2
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data6
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml1
-rw-r--r--roles/flannel/README.md45
-rw-r--r--roles/flannel/defaults/main.yaml8
-rw-r--r--roles/flannel/handlers/main.yml8
-rw-r--r--roles/flannel/meta/main.yml16
-rw-r--r--roles/flannel/tasks/main.yml43
-rw-r--r--roles/flannel_register/README.md47
-rw-r--r--roles/flannel_register/defaults/main.yaml11
-rw-r--r--roles/flannel_register/meta/main.yml16
-rw-r--r--roles/flannel_register/tasks/main.yml14
-rw-r--r--roles/flannel_register/templates/flannel-config.json8
-rw-r--r--roles/haproxy/README.md34
-rw-r--r--roles/haproxy/defaults/main.yml21
-rw-r--r--roles/haproxy/handlers/main.yml5
-rw-r--r--roles/haproxy/meta/main.yml14
-rw-r--r--roles/haproxy/tasks/main.yml25
-rw-r--r--roles/haproxy/templates/haproxy.cfg.j276
-rw-r--r--roles/kube_nfs_volumes/README.md3
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml13
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 (renamed from roles/kube_nfs_volumes/templates/nfs.json.j2)0
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py39
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml4
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml16
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh8
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/tasks/main.yml16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py144
-rw-r--r--roles/openshift_facts/tasks/main.yml7
-rw-r--r--roles/openshift_master/handlers/main.yml10
-rw-r--r--roles/openshift_master/tasks/main.yml115
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.service.j221
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.service.j222
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j236
-rw-r--r--roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j27
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_ca/tasks/main.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml24
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml5
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml15
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml46
-rw-r--r--test/units/README.md2
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/mutli_ec2_test.py95
-rw-r--r--utils/docs/config.md8
-rw-r--r--utils/src/ooinstall/cli_installer.py193
-rw-r--r--utils/src/ooinstall/oo_config.py69
-rw-r--r--utils/src/ooinstall/openshift_ansible.py100
-rw-r--r--utils/src/ooinstall/variants.py5
-rw-r--r--utils/test/cli_installer_tests.py13
-rw-r--r--utils/test/oo_config_tests.py86
124 files changed, 3566 insertions, 573 deletions
diff --git a/.gitignore b/.gitignore
index cacc711a1..8f46c269f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,4 +15,5 @@
.DS_Store
gce.ini
multi_ec2.yaml
+multi_inventory.yaml
.vagrant
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 92f545b25..abeaa06a3 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.6-1 ./
+3.0.9-1 ./
diff --git a/README_vagrant.md b/README_vagrant.md
index 5f87d6633..f3e4cfc18 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -1,5 +1,6 @@
Requirements
------------
+- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
- vagrant (tested against version 1.7.2)
- vagrant-hostmanager plugin (tested against version 1.5.0)
- vagrant-registration plugin (only required for enterprise deployment type)
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
index 5f05df7fc..49bba3acc 100644
--- a/bin/README_SHELL_COMPLETION
+++ b/bin/README_SHELL_COMPLETION
@@ -14,7 +14,7 @@ will populate the cache file and the completions should
become available.
This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
It will then parse a few {host}.{env} out of the json
and return them to be completable.
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
index e891b855a..8786dfc13 100644
--- a/bin/openshift_ansible.conf.example
+++ b/bin/openshift_ansible.conf.example
@@ -1,5 +1,5 @@
#[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
#[host_type_aliases]
#host-type-one = aliasa,aliasb
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 9df034f57..45345007c 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -4,7 +4,10 @@
import os
import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
class ArgumentError(Exception):
"""This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
Keyword arguments:
args -- optional arguments to pass to the inventory script
"""
- mec2 = multi_ec2.MultiEc2(args)
- mec2.run()
- return mec2.result
+ minv = multi_inventory.MultiInventory(args)
+ minv.run()
+ return minv.result
def get_environments(self):
"""Searches for env tags in the inventory and returns all of the envs found."""
diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py
deleted file mode 120000
index 660a0418e..000000000
--- a/bin/openshift_ansible/multi_ec2.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_ec2.py \ No newline at end of file
diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py
new file mode 120000
index 000000000..b40feec07
--- /dev/null
+++ b/bin/openshift_ansible/multi_inventory.py
@@ -0,0 +1 @@
+../../inventory/multi_inventory.py \ No newline at end of file
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 5072161f0..997ff0f9c 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
fi
}
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
__opssh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 44500c618..3c4018636 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
- elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+ elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
fi
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
index 7c6cb7b0b..d205e1055 100644
--- a/bin/zsh_functions/_ossh
+++ b/bin/zsh_functions/_ossh
@@ -1,8 +1,8 @@
#compdef ossh oscp
_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
fi
}
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index a57b0f895..9a17913c4 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
from ansible import errors
from operator import itemgetter
+import OpenSSL.crypto
+import os.path
import pdb
import re
import json
@@ -241,6 +243,21 @@ class FilterModule(object):
return string.split(separator)
@staticmethod
+ def oo_haproxy_backend_masters(hosts):
+ ''' This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ '''
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server_port = host_info['openshift']['master']['api_port']
+ server['address'] = "%s:%s" % (server_ip, server_port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+ @staticmethod
def oo_filter_list(data, filter_attr=None):
''' This returns a list, which contains all items where filter_attr
evaluates to true
@@ -258,7 +275,7 @@ class FilterModule(object):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str")
# Gather up the values for the list of keys passed in
- return [x for x in data if x[filter_attr]]
+ return [x for x in data if x.has_key(filter_attr) and x[filter_attr]]
@staticmethod
def oo_parse_heat_stack_outputs(data):
@@ -327,6 +344,63 @@ class FilterModule(object):
return revamped_outputs
+ @staticmethod
+ # pylint: disable=too-many-branches
+ def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+ ''' Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key" }]
+
+ returns [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/custom2.crt",
+ "keyfile": "/etc/origin/master/custom2.key",
+ "names": [ "some-hostname.com" ] }]
+ '''
+ if not issubclass(type(certificates), list):
+ raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+ if not issubclass(type(data_dir), unicode):
+ raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+ if not issubclass(type(internal_hostnames), list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ certificate['names'] = list(set(certificate['names']))
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+ return certificates
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -342,5 +416,7 @@ class FilterModule(object):
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+ "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+ "oo_parse_certificate_names": self.oo_parse_certificate_names,
+ "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index ad19fe116..56bbb9612 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -5,6 +5,7 @@
masters
nodes
etcd
+lb
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
@@ -41,6 +42,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+
# Project Configuration
#osm_project_request_message=''
#osm_project_request_template=''
@@ -57,21 +61,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Set cockpit plugins
#osm_cockpit_plugins=['cockpit-kubernetes']
-# master cluster ha variables using pacemaker or RHEL HA
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
#openshift_master_cluster_password=openshift_cluster
#openshift_master_cluster_vip=192.168.133.25
#openshift_master_cluster_public_vip=192.168.133.25
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-# master cluster ha variables when using a different HA solution
-# For installation the value of openshift_master_cluster_hostname must resolve
-# to the first master defined in the inventory.
-# The HA solution must be manually configured after installation and must ensure
-# that the master is running on a single master host.
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_defer_ha=True
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
# default subdomain to use for exposed routes
#osm_default_subdomain=apps.test.example.com
@@ -99,6 +111,30 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -106,6 +142,9 @@ ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
+[lb]
+ose3-lb-ansible.test.example.com
+
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 6ed12e011..99746cdbf 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -66,12 +66,22 @@ Examples:
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
- $ plugins/inventory/gce.py --host my_instance
+ $ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
+__requires__ = ['pycrypto>=2.6']
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
@@ -102,9 +112,9 @@ class GceInventory(object):
# Just display data for specific host
if self.args.host:
- print self.json_format_dict(self.node_to_dict(
+ print(self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
- pretty=self.args.pretty)
+ pretty=self.args.pretty))
sys.exit(0)
# Otherwise, assume user wants all instances grouped
@@ -120,7 +130,6 @@ class GceInventory(object):
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
@@ -174,7 +183,6 @@ class GceInventory(object):
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
-
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
@@ -213,8 +221,7 @@ class GceInventory(object):
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
- # Hosts don't always have a public IP name
- #'gce_public_ip': inst.public_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
@@ -222,15 +229,15 @@ class GceInventory(object):
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
- # Hosts don't always have a public IP name
- #'ansible_ssh_host': inst.public_ips[0]
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
- except Exception, e:
+ except Exception as e:
return None
def group_instances(self):
@@ -250,7 +257,10 @@ class GceInventory(object):
tags = node.extra['tags']
for t in tags:
- tag = 'tag_%s' % t
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
deleted file mode 100644
index bbd81ad20..000000000
--- a/inventory/multi_ec2.yaml.example
+++ /dev/null
@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/hosts/ec2.py
- provider_config:
- ec2:
- regions: all
- regions_exclude: us-gov-west-1,cn-north-1
- destination_variable: public_dns_name
- route53: False
- cache_path: ~/.ansible/tmp
- cache_max_age: 300
- vpc_destination_variable: ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- all_group: ec2
- extra_vars:
- cloud: aws
- account: aws1
-
-- name: aws2
- provider: aws/hosts/ec2.py
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60
diff --git a/inventory/multi_ec2.py b/inventory/multi_inventory.py
index 98dde3f3c..354a8c10c 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_inventory.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python2
'''
- Fetch and combine multiple ec2 account settings into a single
+ Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
import fcntl
import tempfile
import copy
+from string import Template
+import shutil
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+ '''Exceptions for MultiInventory class'''
+ pass
+
+class MultiInventory(object):
'''
- MultiEc2 class:
+ MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
@@ -35,7 +41,7 @@ class MultiEc2(object):
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
- self.all_ec2_results = {}
+ self.all_inventory_results = {}
self.result = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
@@ -56,7 +62,7 @@ class MultiEc2(object):
cache is valid for the inventory.
if the cache is valid; return cache
- else the credentials are loaded from multi_ec2.yaml or from the env
+ else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
with open(conf_file) as conf:
config = yaml.safe_load(conf)
+ # Provide a check for unique account names
+ if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+ raise MultiInventoryException('Duplicate account names in config file')
+
return config
def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
else:
cmds.append('--list')
- cmds.append('--refresh-cache')
+ if 'aws' in provider.lower():
+ cmds.append('--refresh-cache')
return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
stdout=subprocess.PIPE, env=env)
@staticmethod
- def generate_config(config_data):
- """Generate the ec2.ini file in as a secure temp file.
- Once generated, pass it to the ec2.py as an environment variable.
+ def generate_config(provider_files):
+ """Generate the provider_files in a temporary directory.
"""
- fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
- for section, values in config_data.items():
- os.write(fildes, "[%s]\n" % section)
- for option, value in values.items():
- os.write(fildes, "%s = %s\n" % (option, value))
- os.close(fildes)
- return tmp_file_path
+ prefix = 'multi_inventory.'
+ tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+ for provider_file in provider_files:
+ filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+ content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+ filedes.write(content)
+ filedes.close()
+
+ return tmp_dir_path
def run_provider(self):
'''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
'''
try:
all_results = []
- tmp_file_paths = []
+ tmp_dir_paths = []
processes = {}
for account in self.config['accounts']:
- env = account['env_vars']
- if account.has_key('provider_config'):
- tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
- env['EC2_INI_PATH'] = tmp_file_paths[-1]
+ tmp_dir = None
+ if account.has_key('provider_files'):
+ tmp_dir = MultiInventory.generate_config(account['provider_files'])
+ tmp_dir_paths.append(tmp_dir)
+
+ # Update env vars after creating provider_config_files
+ # so that we can grab the tmp_dir if it exists
+ env = account.get('env_vars', {})
+ if env and tmp_dir:
+ for key, value in env.items():
+ env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
name = account['name']
provider = account['provider']
processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
})
finally:
- # Clean up the mkstemp file
- for tmp_file in tmp_file_paths:
- os.unlink(tmp_file)
+ # Clean up the mkdtemp dirs
+ for tmp_dir in tmp_dir_paths:
+ shutil.rmtree(tmp_dir)
return all_results
@@ -223,7 +243,7 @@ class MultiEc2(object):
]
raise RuntimeError('\n'.join(err_msg).format(**result))
else:
- self.all_ec2_results[result['name']] = json.loads(result['out'])
+ self.all_inventory_results[result['name']] = json.loads(result['out'])
# Check if user wants extra vars in yaml by
# having hostvars and all_group defined
@@ -231,29 +251,52 @@ class MultiEc2(object):
self.apply_account_config(acc_config)
# Build results by merging all dictionaries
- values = self.all_ec2_results.values()
+ values = self.all_inventory_results.values()
values.insert(0, self.result)
for result in values:
- MultiEc2.merge_destructively(self.result, result)
+ MultiInventory.merge_destructively(self.result, result)
+
+ def add_entry(self, data, keys, item):
+ ''' Add an item to a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ item = c
+ '''
+ if "." in keys:
+ key, rest = keys.split(".", 1)
+ if key not in data:
+ data[key] = {}
+ self.add_entry(data[key], rest, item)
+ else:
+ data[keys] = item
+
+ def get_entry(self, data, keys):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ keys = a.b
+ return c
+ '''
+ if keys and "." in keys:
+ key, rest = keys.split(".", 1)
+ return self.get_entry(data[key], rest)
+ else:
+ return data.get(keys, None)
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
- results = self.all_ec2_results[acc_config['name']]
+ results = self.all_inventory_results[acc_config['name']]
+ results['all_hosts'] = results['_meta']['hostvars'].keys()
# Update each hostvar with the newly desired key: value from extra_*
- for _extra in ['extra_groups', 'extra_vars']:
+ for _extra in ['extra_vars', 'extra_groups']:
for new_var, value in acc_config.get(_extra, {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(new_var)] = str(value)
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, new_var, value)
# Add this group
- if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
- results["%s_%s" % (new_var, value)] = \
- copy.copy(results[acc_config['all_group']])
+ if _extra == 'extra_groups':
+ results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
# Clone groups goes here
for to_name, from_name in acc_config.get('clone_groups', {}).items():
@@ -262,14 +305,11 @@ class MultiEc2(object):
# Clone vars goes here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
- # Verify the account results look sane
- # by checking for these keys ('_meta' and 'hostvars' exist)
- if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
- for data in results['_meta']['hostvars'].values():
- data[str(to_name)] = data.get(str(from_name), 'nil')
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, to_name, self.get_entry(data, from_name))
- # store the results back into all_ec2_results
- self.all_ec2_results[acc_config['name']] = results
+ # store the results back into all_inventory_results
+ self.all_inventory_results[acc_config['name']] = results
@staticmethod
def merge_destructively(input_a, input_b):
@@ -277,7 +317,7 @@ class MultiEc2(object):
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiEc2.merge_destructively(input_a[key], input_b[key])
+ MultiInventory.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
@@ -333,7 +373,7 @@ class MultiEc2(object):
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
- json_data = MultiEc2.json_format_dict(self.result, True)
+ json_data = MultiInventory.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
@@ -369,7 +409,7 @@ class MultiEc2(object):
if __name__ == "__main__":
- MEC2 = MultiEc2()
- MEC2.parse_cli_args()
- MEC2.run()
- print MEC2.result_str()
+ MI2 = MultiInventory()
+ MI2.parse_cli_args()
+ MI2.run()
+ print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
new file mode 100644
index 000000000..0f0788d18
--- /dev/null
+++ b/inventory/multi_inventory.yaml.example
@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+ - name: aws1
+ provider: aws/ec2.py
+ provider_files:
+ - name: ec2.ini
+ content: |-
+ [ec2]
+ regions = all
+ regions_exclude = us-gov-west-1,cn-north-1
+ destination_variable = public_dns_name
+ route53 = False
+ cache_path = ~/.ansible/tmp
+ cache_max_age = 300
+ vpc_destination_variable = ip_address
+ env_vars:
+ AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+ AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ extra_vars:
+ cloud: aws
+ account: aws1
+
+- name: mygce
+ extra_vars:
+ cloud: gce
+ account: gce1
+ env_vars:
+ GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ provider: gce/gce.py
+ provider_files:
+ - name: priv_key.pem
+ contents: |-
+ -----BEGIN PRIVATE KEY-----
+ yourprivatekeydatahere
+ -----END PRIVATE KEY-----
+ - name: gce.ini
+ contents: |-
+ [gce]
+ gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+ gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+ gce_project_id = gce-project
+ zone = us-central1-a
+ network = default
+ gce_machine_type = n1-standard-2
+ gce_machine_image = rhel7
+
+cache_max_age: 600
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index d034e6d84..8b69c4926 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.6
+Version: 3.0.9
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
# Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
# openshift-ansible-docs install
@@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible
mkdir -p %{buildroot}%{_datadir}/ansible/inventory
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
@@ -82,6 +82,8 @@ pushd utils
%{__python} setup.py install --skip-build --root %{buildroot}
# Remove this line once the name change has happened
mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
+mkdir -p %{buildroot}%{_datadir}/atomic-openshift-utils/
+cp etc/ansible.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible.cfg
popd
# Base openshift-ansible files
@@ -104,6 +106,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
%files bin
%{_bindir}/*
+%exclude %{_bindir}/atomic-openshift-installer
%{python_sitelib}/openshift_ansible/
/etc/bash_completion.d/*
%config(noreplace) /etc/openshift_ansible/
@@ -137,7 +140,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
%files inventory
%config(noreplace) /etc/ansible/*
%dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/multi_inventory.py*
%package inventory-aws
Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
@@ -170,6 +173,9 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
%package playbooks
Summary: Openshift and Atomic Enterprise Ansible Playbooks
Requires: %{name}
+Requires: %{name}-roles
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
BuildArch: noarch
%description playbooks
@@ -185,6 +191,8 @@ BuildArch: noarch
%package roles
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name}
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
BuildArch: noarch
%description roles
@@ -246,9 +254,211 @@ Atomic OpenShift Utilities includes
%files -n atomic-openshift-utils
%{python_sitelib}/ooinstall*
%{_bindir}/atomic-openshift-installer
+%{_datadir}/atomic-openshift-utils/ansible.cfg
%changelog
+* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.9-1
+- Refactor upgrade playbook(s) (jdetiber@redhat.com)
+
+* Tue Nov 10 2015 Scott Dodson <sdodson@redhat.com> 3.0.8-1
+- Add origin-clients to uninstall playbook. (abutcher@redhat.com)
+- examples: include logging and metrics infrastructure (lmeyer@redhat.com)
+- Add separate step to enable services during upgrade. (dgoodwin@redhat.com)
+- Update tests now that cli is not asking for rpm/container install
+ (smunilla@redhat.com)
+- atomic-openshift-installer: Remove question for container install
+ (smunilla@redhat.com)
+- Remove references to multi_ec2.py (jdetiber@redhat.com)
+- 1279746: Fix leftover disabled features line in config template.
+ (dgoodwin@redhat.com)
+- 1279734: Ensure services are enabled after upgrade. (dgoodwin@redhat.com)
+- Fix missing etcd_data_dir bug. (dgoodwin@redhat.com)
+- Package the default ansible.cfg with atomic-openshift-utils.
+ (dgoodwin@redhat.com)
+- Add ldap auth identity provider to example inventory. (abutcher@redhat.com)
+- Read etcd data dir from appropriate config file. (dgoodwin@redhat.com)
+- atomic-openshift-installer: Generate inventory off hosts_to_run_on
+ (smunilla@redhat.com)
+- Various fixes related to connect_to (bleanhar@redhat.com)
+- Remove upgrade playbook restriction on 3.0.2. (dgoodwin@redhat.com)
+- Conditionals for flannel etcd client certs. (abutcher@redhat.com)
+- New `iptablesSyncPeriod` field in node configuration (abutcher@redhat.com)
+- Fix indentation on when (jdetiber@redhat.com)
+- Bug 1278863 - Error using openshift_pkg_version (jdetiber@redhat.com)
+- more cleanup of names (mwoodson@redhat.com)
+- Missing conditionals for api/controller sysconfig. (abutcher@redhat.com)
+- Updating the atomic-openshift-isntaller local connection logic for the
+ connect_to addition. (bleanhar@redhat.com)
+- cleaned up network checks (mwoodson@redhat.com)
+- Minor upgrade improvements. (dgoodwin@redhat.com)
+- Wait for cluster to recover after pcs resource restart. (abutcher@redhat.com)
+- Bug 1278245 - Failed to add node to existing env using atomic-openshift-
+ installer (bleanhar@redhat.com)
+- remove debug statement (jdetiber@redhat.com)
+- Fix removal of kubernetesMasterConfig.apiLevels (jdetiber@redhat.com)
+- atomic-openshift-installer: Better specification of ansible connection point
+ (smunilla@redhat.com)
+- Fix issues related to upgrade packages being unavailable
+ (jdetiber@redhat.com)
+- added network checks. also updated item prototype code to support more
+ (mwoodson@redhat.com)
+- Fix data_dir for 3.0 deployments (jdetiber@redhat.com)
+- Fix apiLevels modifications (jdetiber@redhat.com)
+- Fix creation of origin symlink when dir already exists. (dgoodwin@redhat.com)
+- apiLevel changes (jdetiber@redhat.com)
+- Write new config to disk after successful upgrade. (dgoodwin@redhat.com)
+- Fix pylint errors with getting hosts to run on. (dgoodwin@redhat.com)
+- Remove v1beta3 by default for kube_nfs_volumes (jdetiber@redhat.com)
+- Add pre-upgrade script to be run on first master. (dgoodwin@redhat.com)
+- Start to handle pacemaker ha during upgrade (abutcher@redhat.com)
+- Fix lb group related errors (jdetiber@redhat.com)
+- Fix file check conditional. (abutcher@redhat.com)
+- Don't check for certs in data_dir just raise when they can't be found. Fix
+ typo. (abutcher@redhat.com)
+- exclude atomic-openshift-installer from bin subpackage (tdawson@redhat.com)
+- add master_hostnames definition for upgrade (jdetiber@redhat.com)
+- Additional upgrade enhancements (jdetiber@redhat.com)
+- Handle backups for separate etcd hosts if necessary. (dgoodwin@redhat.com)
+- Further upgrade improvements (jdetiber@redhat.com)
+- Upgrade improvements (dgoodwin@redhat.com)
+- Bug 1278243 - Confusing prompt from atomic-openshift-installer
+ (bleanhar@redhat.com)
+- Bug 1278244 - Previously there was no way to add a node in unattended mode
+ (bleanhar@redhat.com)
+- Revert to defaults (abutcher@redhat.com)
+- Bug 1278244 - Incorrect node information gathered by atomic-openshift-
+ installer (bleanhar@redhat.com)
+- atomic-openshift-installer's unattended mode wasn't work with --force for all
+ cases (bleanhar@redhat.com)
+- Making it easier to use pre-release content (bleanhar@redhat.com)
+- The uninstall playbook needs to remove /run/openshift-sdn
+ (bleanhar@redhat.com)
+- Various HA changes for pacemaker and native methods. (abutcher@redhat.com)
+- Bug 1274201 - Fixing non-root installations if using a local connection
+ (bleanhar@redhat.com)
+- Bug 1274201 - Fixing sudo non-interactive test (bleanhar@redhat.com)
+- Bug 1277592 - SDN MTU has hardcoded default (jdetiber@redhat.com)
+- Atomic Enterprise/OpenShift Enterprise merge update (jdetiber@redhat.com)
+- fix dueling controllers - without controllerLeaseTTL set in config, multiple
+ controllers will attempt to start (jdetiber@redhat.com)
+- default to source persistence for haproxy (jdetiber@redhat.com)
+- hardcode openshift binaries for now (jdetiber@redhat.com)
+- more tweaks (jdetiber@redhat.com)
+- more tweaks (jdetiber@redhat.com)
+- additional ha related updates (jdetiber@redhat.com)
+- additional native ha changes (abutcher@redhat.com)
+- Start of true master ha (jdetiber@redhat.com)
+- Atomic Enterprise related changes. (avagarwa@redhat.com)
+- Remove pacemaker bits. (abutcher@redhat.com)
+- Override hosts deployment_type fact for version we're upgrading to.
+ (dgoodwin@redhat.com)
+- Pylint fixes for config upgrade module. (dgoodwin@redhat.com)
+- Disable proxy cert config upgrade until certs being generated.
+ (dgoodwin@redhat.com)
+- remove debug line (florian.lambert@enovance.com)
+- [roles/openshift_master_certificates/tasks/main.yml] Fix variable
+ openshift.master.all_hostnames to openshift.common.all_hostnames
+ (florian.lambert@enovance.com)
+- Fix bug with not upgrading openshift-master to atomic-openshift-master.
+ (dgoodwin@redhat.com)
+- Adding aws and gce packages to ansible-inventory (kwoodson@redhat.com)
+- Fix subpackage dependencies (jdetiber@redhat.com)
+- Refactor common group evaluation to avoid duplication (jdetiber@redhat.com)
+- common/openshift-cluster: Scaleup playbook (smunilla@redhat.com)
+- Fix bug from module rename. (dgoodwin@redhat.com)
+- Fix bug with default ansible playbook dir. (dgoodwin@redhat.com)
+- Use the base package upgrade version so we can check things earlier.
+ (dgoodwin@redhat.com)
+- Skip fail if enterprise deployment type depending on version.
+ (dgoodwin@redhat.com)
+- Add debug output for location of etcd backup. (dgoodwin@redhat.com)
+- Filter internal hostnames from the list of parsed names.
+ (abutcher@redhat.com)
+- Move config upgrade to correct place, fix node facts. (dgoodwin@redhat.com)
+- Add custom certificates to serving info in master configuration.
+ (abutcher@redhat.com)
+- Add in proxyClientInfo if missing during config upgrade.
+ (dgoodwin@redhat.com)
+- Implement master-config.yaml upgrade for v1beta3 apiLevel removal.
+ (dgoodwin@redhat.com)
+- Fix installer upgrade bug following pylint fix. (dgoodwin@redhat.com)
+- Document the new version field for installer config. (dgoodwin@redhat.com)
+- Remove my username from some test data. (dgoodwin@redhat.com)
+- Add a simple version for the installer config file. (dgoodwin@redhat.com)
+- Pylint fix. (dgoodwin@redhat.com)
+- Fix issue with master.proxy-client.{crt,key} and omit. (abutcher@redhat.com)
+- initial module framework (jdetiber@redhat.com)
+- Better info prior to initiating upgrade. (dgoodwin@redhat.com)
+- Fix etcd backup bug with not-yet-created /var/lib/origin symlink
+ (dgoodwin@redhat.com)
+- Print info after upgrade completes. (dgoodwin@redhat.com)
+- Automatically upgrade legacy config files. (dgoodwin@redhat.com)
+- Remove devel fail and let upgrade proceed. (dgoodwin@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Generate timestamped etcd backups. (dgoodwin@redhat.com)
+- Add etcd_data_dir fact. (dgoodwin@redhat.com)
+- Functional disk space checking for etcd backup. (dgoodwin@redhat.com)
+- First cut at checking available disk space for etcd backup.
+ (dgoodwin@redhat.com)
+- Block upgrade if targetting enterprise deployment type. (dgoodwin@redhat.com)
+- Change flannel registration default values (sbaubeau@redhat.com)
+- Remove empty notify section (sbaubeau@redhat.com)
+- Check etcd certs exist for flannel when its support is enabled
+ (sbaubeau@redhat.com)
+- Fix when neither use_openshift_sdn nor use_flannel are specified
+ (sbaubeau@redhat.com)
+- Generate etcd certificats for flannel when is not embedded
+ (sbaubeau@redhat.com)
+- Add missing 2nd true parameters to default Jinja filter (sbaubeau@redhat.com)
+- Use 'command' module instead of 'shell' (sbaubeau@redhat.com)
+- Add flannel modules documentation (sbaubeau@redhat.com)
+- Only remove IPv4 address from docker bridge (sbaubeau@redhat.com)
+- Remove multiple use_flannel fact definition (sbaubeau@redhat.com)
+- Ensure openshift-sdn and flannel can't be used at the same time
+ (sbaubeau@redhat.com)
+- Add flannel support (sbaubeau@redhat.com)
+
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+ (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+ (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+ (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+ warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+ (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+ (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+ (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
* Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
- Adding python-boto and python-libcloud to openshift-ansible-inventory
dependency (kwoodson@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 0503b7cd4..e0dbad900 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -45,6 +45,7 @@
- origin-master-api
- origin-master-controllers
- origin-node
+ - pcsd
- yum: name={{ item }} state=absent
when: not is_atomic | bool
@@ -58,6 +59,7 @@
- atomic-openshift-master
- atomic-openshift-node
- atomic-openshift-sdn-ovs
+ - corosync
- etcd
- openshift
- openshift-master
@@ -66,9 +68,12 @@
- openshift-sdn-ovs
- openvswitch
- origin
+ - origin-clients
- origin-master
- origin-node
- origin-sdn-ovs
+ - pacemaker
+ - pcs
- tuned-profiles-atomic-enterprise-node
- tuned-profiles-atomic-openshift-node
- tuned-profiles-openshift-node
@@ -136,8 +141,10 @@
- file: path={{ item }} state=absent
with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
+ - /etc/corosync
- /etc/etcd
- /etc/openshift
- /etc/openshift-sdn
@@ -151,9 +158,13 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-node
- /root/.kube
- - "~{{ ansible_ssh_user }}/.kube"
+ - /run/openshift-sdn
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/etcd
- /var/lib/openshift
- /var/lib/origin
+ - /var/lib/pacemaker
+
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
deleted file mode 120000
index 73cafffe5..000000000
--- a/playbooks/adhoc/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/adhoc/upgrades/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index a8e3e27bb..5aa6b0f9b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -11,6 +11,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 786918929..09bf34666 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -11,7 +11,7 @@
msg: Deployment type not supported for aws provider yet
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -19,7 +19,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -27,7 +27,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -38,7 +38,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9e50a4a18..411c7e660 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -4,6 +4,7 @@
g_etcd_group: "{{ 'etcd' }}"
g_masters_group: "{{ 'masters' }}"
g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
new file mode 100644
index 000000000..ce7aebf8e
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -0,0 +1,8 @@
+# Upgrade playbooks
+The playbooks provided in this directory can be used for upgrading an existing
+environment. Additional notes for the associated upgrade playbooks are
+provided in their respective directories.
+
+# Upgrades available
+- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md)
+- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md)
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
index 6de8a970f..c91a6cb96 100644
--- a/playbooks/adhoc/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
@@ -1,11 +1,11 @@
-# [NOTE]
-This playbook will re-run installation steps overwriting any local
+# v3.0 minor upgrade playbook
+**Note:** This playbook will re-run installation steps overwriting any local
modifications. You should ensure that your inventory has been updated with any
modifications you've made after your initial installation. If you find any items
that cannot be configured via ansible please open an issue at
https://github.com/openshift/openshift-ansible
-# Overview
+## Overview
This playbook is available as a technical preview. It currently performs the
following steps.
@@ -17,5 +17,5 @@ following steps.
* Updates the default registry if one exists
* Updates image streams and quickstarts
-# Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
new file mode 100644
index 000000000..76fa9ba22
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -0,0 +1,9 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
new file mode 100644
index 000000000..c434be5b7
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
@@ -0,0 +1,17 @@
+# v3.0 to v3.1 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+**TODO: update for current steps**
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..b06442366
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,9 @@
+---
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4c74f96db..a8bd634d3 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,68 +1,5 @@
---
-- name: Populate config host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - fail:
- msg: This playbook rquires g_etcd_group to be set
- when: g_etcd_group is not defined
-
- - fail:
- msg: This playbook rquires g_masters_group to be set
- when: g_masters_group is not defined
-
- - fail:
- msg: This playbook rquires g_nodes_group to be set
- when: g_nodes_group is not defined
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ groups[g_etcd_group][0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: evaluate_groups.yml
- include: ../openshift-etcd/config.yml
@@ -71,4 +8,4 @@
- include: ../openshift-node/config.yml
vars:
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
new file mode 100644
index 000000000..2bb69614f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -0,0 +1,76 @@
+---
+- name: Populate config host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook requires g_etcd_group to be set
+ when: g_etcd_group is not defined
+
+ - fail:
+ msg: This playbook requires g_masters_group to be set
+ when: g_masters_group is not defined
+
+ - fail:
+ msg: This playbook requires g_nodes_group to be set
+ when: g_nodes_group is not defined
+
+ - fail:
+ msg: This playbook requires g_lb_group to be set
+ when: g_lb_group is not defined
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_etcd_group] | default([])
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_nodes_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ groups[g_etcd_group][0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups[g_masters_group][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+ - name: Evaluate oo_lb_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_lb_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_lb_group] | default([])
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..6d2777732
--- /dev/null
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -0,0 +1,16 @@
+---
+- include: evaluate_groups.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: ../openshift-node/config.yml
+ vars:
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..1a6580795 100644
--- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
new file mode 100644
index 000000000..ed4ab6d1b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -0,0 +1,188 @@
+#!/usr/bin/env python
+"""
+Pre-upgrade checks that must be run on a master before proceeding with upgrade.
+"""
+# This is a script not a python module:
+# pylint: disable=invalid-name
+
+# NOTE: This script should not require any python libs other than what is
+# in the standard library.
+
+__license__ = "ASL 2.0"
+
+import json
+import os
+import subprocess
+import re
+
+# The maximum length of container.ports.name
+ALLOWED_LENGTH = 15
+# The valid structure of container.ports.name
+ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
+AT_LEAST_ONE_LETTER = re.compile('[a-z]')
+# look at OS_PATH for the full path. Default ot 'oc'
+OC_PATH = os.getenv('OC_PATH', 'oc')
+
+
+def validate(value):
+ """
+ validate verifies that value matches required conventions
+
+ Rules of container.ports.name validation:
+
+ * must be less that 16 chars
+ * at least one letter
+ * only a-z0-9-
+ * hyphens can not be leading or trailing or next to each other
+
+ :Parameters:
+ - `value`: Value to validate
+ """
+ if len(value) > ALLOWED_LENGTH:
+ return False
+
+ if '--' in value:
+ return False
+
+ # We search since it can be anywhere
+ if not AT_LEAST_ONE_LETTER.search(value):
+ return False
+
+ # We match because it must start at the beginning
+ if not ALLOWED_CHARS.match(value):
+ return False
+ return True
+
+
+def list_items(kind):
+ """
+ list_items returns a list of items from the api
+
+ :Parameters:
+ - `kind`: Kind of item to access
+ """
+ response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
+ items = json.loads(response)
+ return items.get("items", [])
+
+
+def get(obj, *paths):
+ """
+ Gets an object
+
+ :Parameters:
+ - `obj`: A dictionary structure
+ - `path`: All other non-keyword arguments
+ """
+ ret_obj = obj
+ for path in paths:
+ if ret_obj.get(path, None) is None:
+ return []
+ ret_obj = ret_obj[path]
+ return ret_obj
+
+
+# pylint: disable=too-many-arguments
+def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
+ """
+ Prints out results in human friendly way.
+
+ :Parameters:
+ - `namespace`: Namespace of the resource
+ - `kind`: Kind of the resource
+ - `item_name`: Name of the resource
+ - `container_name`: Name of the container. May be "" when kind=Service.
+ - `port_name`: Name of the port
+ - `valid`: True if the port is valid
+ """
+ if not valid:
+ if len(container_name) > 0:
+ print('%s/%s -n %s (Container="%s" Port="%s")' % (
+ kind, item_name, namespace, container_name, port_name))
+ else:
+ print('%s/%s -n %s (Port="%s")' % (
+ kind, item_name, namespace, port_name))
+
+
+def print_validation_header():
+ """
+ Prints the error header. Should run on the first error to avoid
+ overwhelming the user.
+ """
+ print """\
+At least one port name does not validate. Valid port names:
+
+ * must be less that 16 chars
+ * have at least one letter
+ * only a-z0-9-
+ * do not start or end with -
+ * Dashes may not be next to eachother ('--')
+"""
+
+
+def main():
+ """
+ main is the main entry point to this script
+ """
+ try:
+ # the comma at the end suppresses the newline
+ print "Checking for oc ...",
+ subprocess.check_output([OC_PATH, 'whoami'])
+ print "found"
+ except:
+ print(
+ 'Unable to run "%s whoami"\n'
+ 'Please ensure OpenShift is running, and "oc" is on your system '
+ 'path.\n'
+ 'You can override the path with the OC_PATH environment variable.'
+ % OC_PATH)
+ raise SystemExit(1)
+
+ # Where the magic happens
+ first_error = True
+ for kind, path in [
+ ('replicationcontrollers', ("spec", "template", "spec", "containers")),
+ ('pods', ("spec", "containers")),
+ ('deploymentconfigs', ("spec", "template", "spec", "containers"))]:
+ for item in list_items(kind):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for container in get(item, *path):
+ container_name = container["name"]
+ for port in get(container, "ports"):
+ port_name = port.get("name", None)
+ if not port_name:
+ # Unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, kind, item_name,
+ container_name, port_name, valid)
+
+ # Services follow a different flow
+ for item in list_items('services'):
+ namespace = item["metadata"]["namespace"]
+ item_name = item["metadata"]["name"]
+ for port in get(item, "spec", "ports"):
+ port_name = port.get("targetPort", None)
+ if isinstance(port_name, int) or port_name is None:
+ # Integer only or unnamed ports are OK
+ continue
+ valid = validate(port_name)
+ if not valid and first_error:
+ first_error = False
+ print_validation_header()
+ pretty_print_errors(
+ namespace, "services", item_name, "", port_name, valid)
+
+ # If we had at least 1 error then exit with 1
+ if not first_error:
+ raise SystemExit(1)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
new file mode 100644
index 000000000..f90719cab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+
+yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+
+
+echo "---"
+echo "curr_version: ${yum_installed}"
+echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins
new file mode 120000
index 000000000..b1213dedb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
new file mode 100755
index 000000000..a6721bb92
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+"""Ansible module for modifying OpenShift configs during an upgrade"""
+
+import os
+import yaml
+
+DOCUMENTATION = '''
+---
+module: openshift_upgrade_config
+short_description: OpenShift Upgrade Config
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+def modify_api_levels(level_list, remove, ensure, msg_prepend='',
+ msg_append=''):
+ """ modify_api_levels """
+ changed = False
+ changes = []
+
+ if not isinstance(remove, list):
+ remove = []
+
+ if not isinstance(ensure, list):
+ ensure = []
+
+ if not isinstance(level_list, list):
+ new_list = []
+ changed = True
+ changes.append("%s created missing %s" % (msg_prepend, msg_append))
+ else:
+ new_list = level_list
+ for level in remove:
+ if level in new_list:
+ new_list.remove(level)
+ changed = True
+ changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
+
+ for level in ensure:
+ if level not in new_list:
+ new_list.append(level)
+ changed = True
+ changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
+
+ return {'new_list': new_list, 'changed': changed, 'changes': changes}
+
+
+def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
+ """Main upgrade method for 3.0 to 3.1."""
+ changes = []
+
+ # Facts do not get transferred to the hosts where custom modules run,
+ # need to make some assumptions here.
+ master_config = os.path.join(config_base, 'master/master-config.yaml')
+
+ master_cfg_file = open(master_config, 'r')
+ config = yaml.safe_load(master_cfg_file.read())
+ master_cfg_file.close()
+
+
+ # Remove unsupported api versions and ensure supported api versions from
+ # master config
+ unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
+ supported_levels = ['v1']
+
+ result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
+ supported_levels, 'master-config.yaml:', 'from apiLevels')
+ if result['changed']:
+ config['apiLevels'] = result['new_list']
+ changes.append(result['changes'])
+
+ if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig'].pop('apiLevels')
+ changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+
+ # Add proxyClientInfo to master-config
+ if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
+ config['kubernetesMasterConfig']['proxyClientInfo'] = {
+ 'certFile': 'master.proxy-client.crt',
+ 'keyFile': 'master.proxy-client.key'
+ }
+ changes.append("master-config.yaml: added proxyClientInfo")
+
+ if len(changes) > 0:
+ if backup:
+ # TODO: Check success:
+ ansible_module.backup_local(master_config)
+
+ # Write the modified config:
+ out_file = open(master_config, 'w')
+ out_file.write(yaml.safe_dump(config, default_flow_style=False))
+ out_file.close()
+
+ return changes
+
+
+def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
+ """Upgrade entry point."""
+ if from_version == '3.0':
+ if to_version == '3.1':
+ return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
+
+
+def main():
+ """ main """
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name,
+ # redefined-outer-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ config_base=dict(required=True),
+ from_version=dict(required=True, choices=['3.0']),
+ to_version=dict(required=True, choices=['3.1']),
+ role=dict(required=True, choices=['master']),
+ backup=dict(required=False, default=True, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ from_version = module.params['from_version']
+ to_version = module.params['to_version']
+ role = module.params['role']
+ backup = module.params['backup']
+ config_base = module.params['config_base']
+
+ try:
+ changes = []
+ if role == 'master':
+ changes = upgrade_master(module, config_base, from_version,
+ to_version, backup)
+
+ changed = len(changes) > 0
+ return module.exit_json(changed=changed, changes=changes)
+
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception, e:
+ return module.fail_json(msg=str(e))
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
new file mode 120000
index 000000000..aff753026
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/roles b/playbooks/common/openshift-cluster/upgrades/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index ae1d0127c..9f7e49b93 100644
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -1,25 +1,12 @@
---
-- name: Upgrade base package on masters
- hosts: masters
- roles:
- - openshift_facts
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade base package
- yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
+- name: Evaluate groups
+ include: ../../evaluate_groups.yml
- name: Re-Run cluster configuration to apply latest configuration changes
- include: ../../common/openshift-cluster/config.yml
- vars:
- g_etcd_group: "{{ 'etcd' }}"
- g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
+ include: ../../config.yml
- name: Upgrade masters
- hosts: masters
+ hosts: oo_masters_to_config
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
@@ -29,7 +16,7 @@
service: name="{{ openshift.common.service_type}}-master" state=restarted
- name: Upgrade nodes
- hosts: nodes
+ hosts: oo_nodes_to_config
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
@@ -60,19 +47,6 @@
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --confirm
-- name: Update cluster policy bindings
- hosts: oo_first_master
- tasks:
- - name: oadm policy reconcile-cluster-role-bindings --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
-
- name: Upgrade default router
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..dd6979ab7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,407 @@
+---
+- name: Evaluate host groups
+ include: ../../evaluate_groups.yml
+
+- name: Load openshift_facts from the environment
+ hosts: oo_masters_to_config oo_nodes_to_config oo_etcd_to_config oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ gather_facts: no
+ tasks:
+ # Pacemaker is currently the only supported upgrade path for multiple masters
+ - fail:
+ msg: "openshift_master_cluster_method must be set to 'pacemaker'"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
+ - fail:
+ msg: >
+ This upgrade is only supported for origin and openshift-enterprise
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise']
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a 3.1 upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<')
+
+ # If this script errors out ansible will show the default stdout/stderr
+ # which contains details for the user:
+ - script: ../files/pre-upgrade-check
+
+- name: Evaluate etcd_hosts_to_backup
+ hosts: localhost
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ - name: Check available disk space for etcd backup
+ # We assume to be using the data dir for all backups.
+ shell: >
+ df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ - name: Check current embedded etcd disk usage
+ shell: >
+ du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ yum:
+ pkg: etcd
+ state: latest
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+- name: Update deployment type
+ hosts: OSEv3
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ deployment_type }}"
+
+
+- name: Perform upgrade version checking
+ hosts: masters[0]
+ tasks:
+ - name: Clean yum cache
+ command: yum clean all
+
+ - name: Determine available versions
+ script: ../files/versions.sh {{ openshift.common.service_type }} openshift
+ register: g_versions_result
+
+ - set_fact:
+ g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+ - fail:
+ msg: This playbook requires Origin 1.0.6 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
+
+ - fail:
+ msg: Atomic OpenShift 3.1 packages not found
+ when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+
+- name: Upgrade masters
+ hosts: masters
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade to latest available kernel
+ yum:
+ pkg: kernel
+ state: latest
+
+ - name: Upgrade master packages
+ command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
+
+ - name: Ensure python-yaml present for config upgrade
+ yum:
+ pkg: PyYAML
+ state: installed
+
+ - name: Upgrade master configuration
+ openshift_upgrade_config:
+ from_version: '3.0'
+ to_version: '3.1'
+ role: master
+ config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+ - set_fact:
+ master_certs_missing: True
+ master_cert_subdir: master-{{ openshift.common.hostname }}
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+
+- name: Generate missing master certificates
+ hosts: masters[0]
+ vars:
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups.masters)
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
+ masters_needing_certs: "{{ hostvars
+ | oo_select_keys(groups.masters)
+ | difference([groups.masters.0]) }}"
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_master_certificates
+ post_tasks:
+ - name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: absent
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ with_nested:
+ - masters_needing_certs
+ - - master.etcd-client.crt
+ - master.etcd-client.key
+
+ - name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
+ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
+ with_items: masters_needing_certs
+
+ - name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: masters_needing_certs
+
+- name: Sync certs and restart masters post configuration change
+ hosts: masters
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ tasks:
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
+ dest: "{{ master_cert_config_dir }}"
+ when: inventory_hostname != groups.masters.0
+
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+- name: Destroy cluster
+ hosts: masters[0]
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ pre_tasks:
+ - name: Check for configured cluster
+ stat:
+ path: /etc/corosync/corosync.conf
+ register: corosync_conf
+ when: openshift_master_ha | bool
+ - name: Destroy cluster
+ command: pcs cluster destroy --all
+ when: openshift_master_ha | bool and corosync_conf.stat.exists == true
+
+- name: Start pcsd on masters
+ hosts: masters
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ tasks:
+ - name: Start pcsd
+ service: name=pcsd enabled=yes state=started
+ when: openshift_master_ha | bool
+
+- name: Re-create cluster
+ hosts: masters[0]
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+ omc_cluster_hosts: "{{ groups.masters | join(' ') }}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+
+
+- name: Upgrade nodes
+ hosts: nodes
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Upgrade node packages
+ command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Update cluster policy and policy bindings
+ hosts: masters[0]
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+ - name: oadm policy reconcile-cluster-role-bindings --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+
+
+- name: Restart masters post reconcile
+ hosts: masters
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ tasks:
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+ when: not openshift_master_ha | bool
+
+- name: Restart cluster post reconcile
+ hosts: masters[0]
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ tasks:
+ - name: Restart master cluster
+ command: pcs resource restart master
+ when: openshift_master_ha | bool
+ - name: Wait for the clustered master service to be available
+ wait_for:
+ host: "{{ openshift_master_cluster_vip }}"
+ port: 8443
+ state: started
+ timeout: 180
+ delay: 90
+ when: openshift_master_ha | bool
+
+- name: Upgrade default router and registry
+ hosts: masters[0]
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: masters[0]
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
+
+- name: Ensure master services enabled
+ hosts: masters
+ vars:
+ openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
+ tasks:
+ - name: Enable master services
+ service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
+ when: not openshift_master_ha | bool
+
+- name: Ensure node services enabled
+ hosts: nodes
+ tasks:
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 952960652..ed23ada88 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -13,6 +13,8 @@
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ - role: etcd
+ local_facts: {}
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1dec923fc..1b3fba3aa 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -34,7 +34,9 @@
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: master
local_facts:
@@ -44,12 +46,14 @@
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ - role: etcd
+ local_facts: {}
+ when: openshift.master.embedded_etcd | bool
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -168,6 +172,10 @@
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- openshift_master_certificates
@@ -199,12 +207,84 @@
validate_checksum: yes
with_items: masters_needing_certs
+- name: Inspect named certificates
+ hosts: oo_first_master
+ tasks:
+ - name: Collect certificate names
+ set_fact:
+ parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+ when: openshift_master_named_certificates is defined
+
+- name: Compute haproxy_backend_servers
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - set_fact:
+ haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}"
+
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+ default_backend: atomic-openshift-api
+ haproxy_backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
+ roles:
+ - role: haproxy
+ when: groups.oo_masters_to_config | length > 1
+
+- name: Generate master session keys
+ hosts: oo_first_master
+ tasks:
+ - fail:
+ msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
+ - fail:
+ msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+ - name: Generate session authentication key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_auth_output
+ with_sequence: count=1
+ when: openshift_master_session_auth_secrets is undefined
+ - name: Generate session encryption key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_encryption_output
+ with_sequence: count=1
+ when: openshift_master_session_encryption_secrets is undefined
+ - set_fact:
+ session_auth_secret: "{{ openshift_master_session_auth_secrets
+ | default(session_auth_output.results
+ | map(attribute='stdout')
+ | list) }}"
+ session_encryption_secret: "{{ openshift_master_session_encryption_secrets
+ | default(session_encryption_output.results
+ | map(attribute='stdout')
+ | list) }}"
+
- name: Configure master instances
hosts: oo_masters_to_config
+ serial: 1
vars:
+ named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
+ openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
+ openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -233,11 +313,25 @@
omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
roles:
- role: openshift_master_cluster
- when: openshift_master_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
+- name: Determine cluster dns ip
+ hosts: oo_first_master
+ tasks:
+ - name: Get master service ip
+ command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
+ register: master_service_ip_output
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+
- name: Enable cockpit
hosts: oo_first_master
vars:
@@ -247,6 +341,14 @@
when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
+- name: Configure flannel
+ hosts: oo_first_master
+ vars:
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ roles:
+ - role: flannel_register
+ when: openshift.common.use_flannel | bool
+
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index a14ca8e11..8da9e231f 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -38,6 +38,22 @@
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
+ - name: Check status of flannel external etcd certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-ca.crt
+ register: g_external_etcd_flannel_cert_stat_result
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+ - set_fact:
+ etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
+ | map(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ etcd_cert_prefix: node.etcd-
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Create temp directory for syncing certs
hosts: localhost
@@ -50,6 +66,64 @@
register: mktemp
changed_when: False
+- name: Configure flannel etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ pre_tasks:
+ - set_fact:
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ roles:
+ - role: etcd_certificates
+ post_tasks:
+ - name: Create a tarball of the etcd flannel certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
+- name: Copy the external etcd flannel certs to the nodes
+ hosts: oo_nodes_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/node"
+ state: directory
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-client.key
+ - node.etcd-ca.crt
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
- name: Create node certificates
hosts: oo_first_master
vars:
@@ -84,6 +158,8 @@
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -100,6 +176,8 @@
when: certs_missing
roles:
- openshift_node
+ - role: flannel
+ when: openshift.common.use_flannel | bool
- role: nickhammond.logrotate
- role: fluentd_node
when: openshift.common.use_fluentd | bool
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 6ca4f7395..745161bcb 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -16,6 +16,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
index 0dfa3e9d7..c8f6065cd 100644
--- a/playbooks/gce/openshift-cluster/join_node.yml
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -46,4 +46,4 @@
openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index c22b897d5..8be5d53e7 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -9,7 +9,7 @@
- fail: msg="Deployment type not supported for gce provider yet"
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -17,7 +17,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -28,7 +28,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index c208eee81..4d1ae22ff 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index d3e768de5..8d7949dd1 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -17,7 +17,7 @@
- include: tasks/configure_libvirt.yml
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -25,7 +25,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -33,7 +33,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -44,7 +44,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index eacae7c7e..e0c966e45 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -19,5 +19,11 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
+
runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index a5ee2d6a5..888804e28 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -10,6 +10,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
new file mode 100644
index 000000000..b8aa830ac
--- /dev/null
+++ b/roles/flannel/README.md
@@ -0,0 +1,45 @@
+Role Name
+=========
+
+Configure flannel on openshift nodes
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|-----------------------------------------|-----------------------------------------------|
+| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
+| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_node
+ roles:
+ - { role: flannel, etcd_urls: ['https://127.0.0.1:2379'] }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
new file mode 100644
index 000000000..34cebda9c
--- /dev/null
+++ b/roles/flannel/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+flannel_interface: "{{ ansible_default_ipv4.interface }}"
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/node"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
new file mode 100644
index 000000000..f9b9ae7f1
--- /dev/null
+++ b/roles/flannel/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+- name: restart flanneld
+ sudo: true
+ service: name=flanneld state=restarted
+
+- name: restart docker
+ sudo: true
+ service: name=docker state=restarted
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
new file mode 100644
index 000000000..909bdbfa4
--- /dev/null
+++ b/roles/flannel/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: flannel management
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
new file mode 100644
index 000000000..acfb009ec
--- /dev/null
+++ b/roles/flannel/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Install flannel
+ sudo: true
+ yum: pkg=flannel state=present
+
+- name: Set flannel etcd url
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD=)"
+ line: '\1{{ etcd_hosts|join(",") }}'
+
+- name: Set flannel etcd key
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD_KEY=)"
+ line: '\1{{ flannel_etcd_key }}'
+
+- name: Set flannel options
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^#?(FLANNEL_OPTIONS=)"
+ line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
+
+- name: Enable flanneld
+ sudo: true
+ service:
+ name: flanneld
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Remove docker bridge ip
+ sudo: true
+ shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
+ notify:
+ - restart docker
+ - restart node
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
new file mode 100644
index 000000000..ba7541ab1
--- /dev/null
+++ b/roles/flannel_register/README.md
@@ -0,0 +1,47 @@
+Role Name
+=========
+
+Register flannel configuration into etcd
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|----------------------------------------------------|-------------------------------------------------|
+| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
+| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | {{ etcd_conf_dir }}/master.etcd-client.crt | SSL cert to use for etcd |
+| etcd_peer_key_file | {{ etcd_conf_dir }}/master.etcd-client.key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_master
+ roles:
+ - { flannel_register }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
new file mode 100644
index 000000000..269d1a17c
--- /dev/null
+++ b/roles/flannel_register/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_min_network: 172.30.5.0
+flannel_subnet_len: 24
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/master"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
+
diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml
new file mode 100644
index 000000000..73bddcca4
--- /dev/null
+++ b/roles/flannel_register/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: register flannel configuration into etcd
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
new file mode 100644
index 000000000..1629157c8
--- /dev/null
+++ b/roles/flannel_register/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Assures /etc/flannel dir exists
+ sudo: true
+ file: path=/etc/flannel state=directory
+
+- name: Generate etcd configuration for etcd
+ sudo: true
+ template:
+ src: "flannel-config.json"
+ dest: "/etc/flannel/config.json"
+
+- name: Insert flannel configuration into etcd
+ sudo: true
+ command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
new file mode 100644
index 000000000..89ce4c30b
--- /dev/null
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -0,0 +1,8 @@
+{
+ "Network": "{{ flannel_network }}",
+ "SubnetLen": {{ flannel_subnet_len }},
+ "SubnetMin": "{{ flannel_min_network }}",
+ "Backend": {
+ "Type": "host-gw"
+ }
+}
diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md
new file mode 100644
index 000000000..5bc415066
--- /dev/null
+++ b/roles/haproxy/README.md
@@ -0,0 +1,34 @@
+HAProxy
+=======
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
new file mode 100644
index 000000000..7ba5bd485
--- /dev/null
+++ b/roles/haproxy/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+haproxy_frontends:
+- name: main
+ binds:
+ - "*:80"
+ default_backend: default
+
+haproxy_backends:
+- name: default
+ balance: roundrobin
+ servers:
+ - name: web01
+ address: 127.0.0.1:9000
+ opts: check
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: haproxy stats
+ port: "9000/tcp"
+- service: haproxy balance
+ port: "8443/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
new file mode 100644
index 000000000..ee60adcab
--- /dev/null
+++ b/roles/haproxy/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart haproxy
+ service:
+ name: haproxy
+ state: restarted
diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml
new file mode 100644
index 000000000..0fad106a9
--- /dev/null
+++ b/roles/haproxy/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: HAProxy
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- { role: os_firewall }
+- { role: openshift_repos }
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
new file mode 100644
index 000000000..5638b7313
--- /dev/null
+++ b/roles/haproxy/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install haproxy
+ yum:
+ pkg: haproxy
+ state: present
+
+- name: Configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: root
+ group: root
+ mode: 0644
+ notify: restart haproxy
+
+- name: Enable and start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Pause 30 seconds if haproxy was just started
+ pause: seconds=30
+ when: start_result | changed
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2
new file mode 100644
index 000000000..c932af72f
--- /dev/null
+++ b/roles/haproxy/templates/haproxy.cfg.j2
@@ -0,0 +1,76 @@
+# Global settings
+#---------------------------------------------------------------------
+global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 300s
+ timeout server 300s
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+listen stats :9000
+ mode http
+ stats enable
+ stats uri /
+
+{% for frontend in haproxy_frontends %}
+frontend {{ frontend.name }}
+{% for bind in frontend.binds %}
+ bind {{ bind }}
+{% endfor %}
+ default_backend {{ frontend.default_backend }}
+{% if 'mode' in frontend %}
+ mode {{ frontend.mode }}
+{% endif %}
+{% if 'options' in frontend %}
+{% for option in frontend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% if 'redirects' in frontend %}
+{% for redirect in frontend.redirects %}
+ redirect {{ redirect }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
+{% for backend in haproxy_backends %}
+backend {{ backend.name }}
+ balance {{ backend.balance }}
+{% if 'mode' in backend %}
+ mode {{ backend.mode }}
+{% endif %}
+{% if 'options' in backend %}
+{% for option in backend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% for server in backend.servers %}
+ server {{ server.name }} {{ server.address }} {{ server.opts }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 56c69c286..1520f79b2 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443
# Token to use for authentication to the API server
kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+# API Version to use for kubernetes
+kube_api_version: v1
```
## Dependencies
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
index e296492f9..bdd994d07 100644
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -1,4 +1,10 @@
---
+kubernetes_url: https://172.30.0.1:443
+
+kube_api_version: v1
+
+kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
+
# Options of NFS exports.
nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index f4a506234..d1dcf261a 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -16,10 +16,11 @@
- include: nfs.yml
- name: export physical volumes
- uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
- method=POST
- body='{{ lookup("template", "../templates/nfs.json.j2") }}'
- body_format=json
- status_code=201
- HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ uri:
+ url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
+ method: POST
+ body: "{{ lookup('template', kube_req_template) }}"
+ body_format: json
+ status_code: 201
+ HEADER_Authorization: "Bearer {{ kubernetes_token }}"
with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
new file mode 120000
index 000000000..49c1191bc
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
@@ -0,0 +1 @@
+../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
index b42886ef1..b42886ef1 100644
--- a/roles/kube_nfs_volumes/templates/nfs.json.j2
+++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index e7fd6fa21..43498c015 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -67,7 +67,24 @@ def get_template(zapi, template_name):
return None
return content['result'][0]
-def get_type(ztype):
+def get_multiplier(inval):
+ ''' Determine the multiplier
+ '''
+ if inval == None or inval == '':
+ return None, 0
+
+ rval = None
+ try:
+ rval = int(inval)
+ except ValueError:
+ pass
+
+ if rval:
+ return rval, 1
+
+ return rval, 0
+
+def get_zabbix_type(ztype):
'''
Determine which type of discoverrule this is
'''
@@ -87,6 +104,7 @@ def get_type(ztype):
'telnet': 14,
'calculated': 15,
'JMX': 16,
+ 'SNMP trap': 17,
}
for typ in _types.keys():
@@ -153,16 +171,21 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
description=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
- ztype=dict(default='trapper', type='str'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
status=dict(default='enabled', type='str'),
applications=dict(default=[], type='list'),
- template_name=dict(default=None, type='str'),
discoveryrule_key=dict(default=None, type='str'),
+ interval=dict(default=60, type='int'),
+ delta=dict(default=0, type='int'),
+ multiplier=dict(default=None, type='str'),
+ units=dict(default=None, type='str'),
+
),
#supports_check_mode=True
)
@@ -205,15 +228,23 @@ def main():
# Create and Update
if state == 'present':
+
+ formula, use_multiplier = get_multiplier(module.params['multiplier'])
+
params = {'name': module.params['name'],
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
- 'type': get_type(module.params['ztype']),
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
+ 'formula': formula,
+ 'multiplier': use_multiplier,
'description': module.params['description'],
+ 'units': module.params['units'],
+ 'delay': module.params['interval'],
+ 'delta': module.params['delta'],
}
if params['type'] in [2, 5, 7, 8, 11, 15]:
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index ac9cf756b..44c4e6766 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -84,6 +84,10 @@
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
+ multiplier: "{{ item.multiplier | default('', True) }}"
+ units: "{{ item.units | default('', True) }}"
+ interval: "{{ item.interval | default(60, True) }}"
+ delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitemprototypes
when: template.zitemprototypes is defined
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 9cc15c0a8..f6919dada 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -1,12 +1,16 @@
---
- yum:
- name: openshift-ansible-inventory
+ name: "{{ item }}"
state: present
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
- name:
copy:
content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_ec2.yaml
+ dest: /etc/ansible/multi_inventory.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
mode: "0640"
@@ -20,17 +24,17 @@
- file:
state: link
- src: /usr/share/ansible/inventory/multi_ec2.py
- dest: /etc/ansible/inventory/multi_ec2.py
+ src: /usr/share/ansible/inventory/multi_inventory.py
+ dest: /etc/ansible/inventory/multi_inventory.py
owner: root
group: libra_ops
# This cron uses the above location to call its job
- name: Cron to keep cache fresh
cron:
- name: 'multi_ec2_inventory'
+ name: 'multi_inventory'
minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+ job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
- name: Set cache location
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 73bd28630..38d5a08e4 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,4 +1,8 @@
---
+- fail:
+ msg: Flannel can not be used with openshift sdn
+ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+
- name: Set common Cluster facts
openshift_facts:
role: common
@@ -13,6 +17,7 @@
sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
+ use_flannel: "{{ openshift_use_flannel | default(None) }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 2043985ec..8e8bc6868 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -14,5 +14,7 @@ db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
quickstarts_base: "{{ examples_base }}/quickstart-templates"
+infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin"
+infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise"
openshift_examples_import_command: "create"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 21137e31b..3fda86907 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -7,8 +7,10 @@
EXAMPLES_BASE=$(pwd)/files/examples
find files/examples -name '*.json' -delete
+find files/examples -name '*.yaml' -delete
TEMP=`mktemp -d`
pushd $TEMP
+
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/openshift/django-ex/archive/master.zip -O django-ex-master.zip
wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master.zip
@@ -33,5 +35,11 @@ cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
mv application-templates-master/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
find application-templates-master/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+
+wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
+cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
+
popd
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..b3b60bf9b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
new file mode 100644
index 000000000..4c798e148
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 40b7a5d6e..f48e207e7 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -37,6 +37,22 @@
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
changed_when: false
+- name: Import origin infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }}
+ when: openshift_examples_load_centos | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
+- name: Import enterprise infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }}
+ when: openshift_examples_load_rhel | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
- name: Import xPaas image streams
command: >
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 163e67f62..2e1075aca 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -20,6 +20,8 @@ EXAMPLES = '''
import ConfigParser
import copy
import os
+import StringIO
+import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
from netaddr import IPNetwork
@@ -307,6 +309,23 @@ def set_fluentd_facts_if_unset(facts):
facts['common']['use_fluentd'] = use_fluentd
return facts
+def set_flannel_facts_if_unset(facts):
+ """ Set flannel facts if not already present in facts dict
+ dict: the facts dict updated with the flannel facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the flannel
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_flannel' not in facts['common']:
+ use_flannel = False
+ facts['common']['use_flannel'] = use_flannel
+ return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -407,7 +426,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type == 'enterprise':
+ if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -484,12 +503,16 @@ def set_aggregate_facts(facts):
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
+ internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ internal_hostnames.add(facts['common']['hostname'])
+ internal_hostnames.add(facts['common']['ip'])
+
if 'master' in facts:
# FIXME: not sure why but facts['dns']['domain'] fails
cluster_domain = 'cluster.local'
@@ -497,14 +520,61 @@ def set_aggregate_facts(facts):
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
- all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc',
- 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
- 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain])
+ svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+ 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+ 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+ all_hostnames.update(svc_names)
+ internal_hostnames.update(svc_names)
first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
all_hostnames.add(first_svc_ip)
+ internal_hostnames.add(first_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(internal_hostnames)
+
+ return facts
+
+
+def set_etcd_facts_if_unset(facts):
+ """
+ If using embedded etcd, loads the data directory from master-config.yaml.
+
+ If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
+ If anything goes wrong parsing these, the fact will not be set.
+ """
+ if 'etcd' in facts:
+ if 'master' in facts and facts['master']['embedded_etcd']:
+ try:
+ # Parse master config to find actual etcd data dir:
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ master_cfg_f = open(master_cfg_path, 'r')
+ config = yaml.safe_load(master_cfg_f.read())
+ master_cfg_f.close()
+
+ facts['etcd']['etcd_data_dir'] = \
+ config['etcdConfig']['storageDirectory']
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+ else:
+ # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+ try:
+ # Add a fake section for parsing:
+ ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+ ini_fp = StringIO.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+ if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+ etcd_data_dir = etcd_data_dir[1:-1]
+ facts['etcd']['etcd_data_dir'] = etcd_data_dir
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
return facts
def set_deployment_facts_if_unset(facts):
@@ -534,21 +604,18 @@ def set_deployment_facts_if_unset(facts):
config_base = '/etc/origin'
if deployment_type in ['enterprise', 'online']:
config_base = '/etc/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
+ config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
+ data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
- facts['common']['version'] = version = get_openshift_version()
- if version is not None:
- if deployment_type == 'origin':
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
- else:
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
- else:
- version_gt_3_1_or_1_1 = True
- facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
for role in ('master', 'node'):
if role in facts:
@@ -582,12 +649,34 @@ def set_deployment_facts_if_unset(facts):
return facts
+def set_version_facts_if_unset(facts):
+ """ Set version facts. This currently includes common.version and
+ common.version_greater_than_3_1_or_1_1.
-def set_sdn_facts_if_unset(facts):
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with version facts.
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ facts['common']['version'] = version = get_openshift_version()
+ if version is not None:
+ if deployment_type == 'origin':
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+ else:
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+ else:
+ version_gt_3_1_or_1_1 = True
+ facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
+ return facts
+
+def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
+ system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
@@ -606,9 +695,18 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
- if 'node' in facts:
- if 'sdn_mtu' not in facts['node']:
- facts['node']['sdn_mtu'] = '1450'
+ if 'node' in facts and 'sdn_mtu' not in facts['node']:
+ node_ip = facts['common']['ip']
+
+ # default MTU if interface MTU cannot be detected
+ facts['node']['sdn_mtu'] = '1450'
+
+ for val in system_facts.itervalues():
+ if isinstance(val, dict) and 'mtu' in val:
+ mtu = val['mtu']
+
+ if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
+ facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
@@ -841,7 +939,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -875,13 +973,16 @@ class OpenShiftFacts(object):
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
+ facts = set_flannel_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
+ facts = set_etcd_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -920,11 +1021,12 @@ class OpenShiftFacts(object):
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
- oauth_grant_method='auto', cluster_defer_ha=False)
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
- node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16')
+ node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
+ iptables_sync_period='5s')
defaults['node'] = node
return defaults
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index a46b45b8c..a28aa7ba2 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -6,8 +6,11 @@
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
-- name: Ensure python-netaddr is installed
- yum: pkg=python-netaddr state=installed
+- name: Ensure python-netaddr and PyYaml are installed
+ yum: pkg={{ item }} state=installed
+ with_items:
+ - python-netaddr
+ - PyYAML
- name: Gather Cluster facts
openshift_facts:
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 37028e0f6..4b9500cbd 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,3 +2,13 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 3a886935f..185bfb8f3 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -9,16 +9,22 @@
when: openshift_master_oauth_grant_method is defined
- fail:
+ msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+- fail:
+ msg: "'native' high availability is not supported for the requested OpenShift version"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool
+- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
- name: Set master facts
openshift_facts:
role: master
local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -41,6 +47,8 @@
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
@@ -63,6 +71,8 @@
controller_args: "{{ osm_controller_args | default(None) }}"
infra_nodes: "{{ num_infra | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
+ master_count: "{{ openshift_master_count | default(None) }}"
+ controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
@@ -77,7 +87,7 @@
domain: cluster.local
when: openshift.master.embedded_dns
-- name: Create config parent directory if it doesn't exist
+- name: Create config parent directory if it does not exist
file:
path: "{{ openshift_master_config_dir }}"
state: directory
@@ -90,6 +100,8 @@
creates: "{{ openshift_master_policy }}"
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
template:
@@ -98,6 +110,8 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
@@ -120,6 +134,44 @@
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+# workaround for missing systemd unit files for controllers/api
+- name: Create the api service file
+ template:
+ src: atomic-openshift-master-api.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers service file
+ template:
+ src: atomic-openshift-master-controllers.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the api env file
+ template:
+ src: atomic-openshift-master-api.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers env file
+ template:
+ src: atomic-openshift-master-controllers.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- command: systemctl daemon-reload
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+# end workaround for missing systemd unit files
+
+- name: Create session secrets file
+ template:
+ dest: "{{ openshift.master.session_secrets_file }}"
+ src: sessionSecretsFile.yaml.v1.j2
+ force: no
+ notify:
+ - restart master
+ - restart master api
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
@@ -128,12 +180,15 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Configure master settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
+ create: yes
with_items:
- regex: '^OPTIONS='
line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
@@ -142,6 +197,34 @@
notify:
- restart master
+- name: Configure master api settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master api
+
+- name: Configure master controller settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master controllers
+
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
@@ -149,15 +232,37 @@
- set_fact:
master_service_status_changed = start_result | changed
+ when: not openshift_master_ha | bool
+
+- name: Start and enable master api
+ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+
+- set_fact:
+ master_api_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+
+# TODO: fix the ugly workaround of setting ignore_errors
+# the controllers service tries to start even if it is already started
+- name: Start and enable master controller
+ service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+ ignore_errors: yes
+
+- set_fact:
+ master_controllers_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- name: Set the cluster user password
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
new file mode 100644
index 000000000..ba19fb348
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
new file mode 100644
index 000000000..8952c86ef
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 73a0bc6cc..bb12a0a0f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,5 +1,5 @@
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
- v1beta3
{% endif %}
- v1
@@ -10,24 +10,33 @@ assetConfig:
publicURL: {{ openshift.master.public_console_url }}/
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_ha | bool %}
+controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
+{% endif %}
+controllers: '*'
corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- {{ origin }}
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+ - {{ name }}
+{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+ bindNetwork: tcp4
{% endif %}
etcdClientInfo:
ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
@@ -70,16 +79,15 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
- v1beta3
-{% endif %}
- v1
+{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
-{# TODO: support overriding masterCount #}
- masterCount: 1
- masterIP: ""
+ masterCount: {{ openshift.master.master_count }}
+ masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
certFile: master.proxy-client.crt
@@ -103,6 +111,7 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
+pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
openshiftInfrastructureNamespace: openshift-infra
@@ -118,6 +127,7 @@ projectConfig:
routingConfig:
subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
+ limitSecretReferences: false
managedNames:
- default
- builder
@@ -128,8 +138,20 @@ serviceAccountConfig:
- serviceaccounts.public.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
+{% if named_certificates %}
+ namedCertificates:
+{% for named_certificate in named_certificates %}
+ - certFile: {{ named_certificate['certfile'] }}
+ keyFile: {{ named_certificate['keyfile'] }}
+ names:
+{% for name in named_certificate['names'] %}
+ - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
new file mode 100644
index 000000000..d12d9db90
--- /dev/null
+++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift_master_session_auth_secrets %}
+- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}"
+ encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}"
+{% endfor %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index ecdb4f883..534465451 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -2,6 +2,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index cfd1ceabf..314f068e7 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -14,7 +14,7 @@
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ master_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 4b39b043a..13e5d7a4b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -6,13 +6,9 @@
mode: 0700
with_items: masters_needing_certs
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: hard
- with_nested:
- - masters_needing_certs
- - - ca.crt
+- set_fact:
+ master_certificates:
+ - ca.crt
- ca.key
- ca.serial.txt
- admin.crt
@@ -20,8 +16,6 @@
- admin.kubeconfig
- master.kubelet-client.crt
- master.kubelet-client.key
- - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- openshift-master.crt
- openshift-master.key
- openshift-master.kubeconfig
@@ -33,9 +27,17 @@
- openshift-router.kubeconfig
- serviceaccounts.private.key
- serviceaccounts.public.key
+ master_31_certificates:
+ - master.proxy-client.crt
+ - master.proxy-client.key
-- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}"
- with_items: masters_needing_certs
+- file:
+ src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
+ dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: hard
+ with_nested:
+ - masters_needing_certs
+ - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}"
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
deleted file mode 100644
index 3b416005b..000000000
--- a/roles/openshift_master_cluster/tasks/configure_deferred.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- debug: msg="Deferring config"
-
-- name: Start and enable the master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 315947183..6303a6e46 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -4,10 +4,7 @@
register: pcs_status
changed_when: false
failed_when: false
- when: not openshift.master.cluster_defer_ha | bool
+ when: openshift.master.cluster_method == "pacemaker"
- include: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
-
-- include: configure_deferred.yml
- when: openshift.master.cluster_defer_ha | bool
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c92008a77..9d40ae3b3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_common }
+- { role: docker }
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index aea60b75c..d11bc5123 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -8,7 +8,7 @@
when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
- when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
+ when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- name: Set node facts
openshift_facts:
@@ -22,16 +22,17 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
annotations: "{{ openshift_node_annotations | default(none) }}"
- registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}"
docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}"
+ iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 4931d127e..7d2f506e3 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -4,6 +4,7 @@ dnsDomain: {{ osn_cluster_dns_domain }}
dnsIP: {{ osn_cluster_dns_ip }}
dockerConfig:
execHandlerName: ""
+iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
@@ -22,6 +23,7 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+nodeIP: {{ openshift.common.ip }}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 12e98b7a1..aa696ae12 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -8,7 +8,7 @@
# proper repos correctly.
- assert:
- that: openshift_deployment_type in known_openshift_deployment_types
+ that: openshift.common.deployment_type in known_openshift_deployment_types
- name: Ensure libselinux-python is installed
yum:
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index aeeec4b8d..04665be62 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -194,6 +194,16 @@ g_template_os_linux:
lifetime: 1
description: "Dynamically register the filesystems"
+ - name: disc.disk
+ key: disc.disk
+ lifetime: 1
+ description: "Dynamically register disks on a node"
+
+ - name: disc.network
+ key: disc.network
+ lifetime: 1
+ description: "Dynamically register network interfaces on a node"
+
zitemprototypes:
- discoveryrule_key: disc.filesys
name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -211,6 +221,42 @@ g_template_os_linux:
applications:
- Disk
+ - discoveryrule_key: disc.disk
+ name: "TPS (IOPS) for disk {#OSO_DISK}"
+ key: "disc.disk.tps[{#OSO_DISK}]"
+ value_type: int
+ description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.disk
+ name: "Percent Utilized for disk {#OSO_DISK}"
+ key: "disc.disk.putil[{#OSO_DISK}]"
+ value_type: float
+ description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
ztriggerprototypes:
- name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
diff --git a/test/units/README.md b/test/units/README.md
index 3bed227eb..78a02c3ea 100644
--- a/test/units/README.md
+++ b/test/units/README.md
@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
$ source test/env-setup
Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
new file mode 100755
index 000000000..168cd82b7
--- /dev/null
+++ b/test/units/multi_inventory_test.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+ '''
+ Test class for multiInventory
+ '''
+
+# def setUp(self):
+# '''setup method'''
+# pass
+
+ def test_merge_simple_1(self):
+ '''Testing a simple merge of 2 dictionaries'''
+ a = {"key1" : 1}
+ b = {"key1" : 2}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2]})
+
+ def test_merge_b_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ a = {"key1" : 1}
+ b = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_a_empty(self):
+ '''Testing a merge of an emtpy dictionary'''
+ b = {"key1" : 1}
+ a = {}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": 1})
+
+ def test_merge_hash_array(self):
+ '''Testing a merge of a dictionary and a dictionary with an array'''
+ a = {"key1" : {"hasha": 1}}
+ b = {"key1" : [1, 2]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+ def test_merge_array_hash(self):
+ '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+ a = {"key1" : [1, 2]}
+ b = {"key1" : {"hasha": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+ def test_merge_keys_1(self):
+ '''Testing a merge on a dictionary for keys'''
+ a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+ b = {"key2" : {"hashb": 1}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+ def test_merge_recursive_1(self):
+ '''Testing a recursive merge'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_array_item(self):
+ '''Testing a recursive merge for an array'''
+ a = {"a" : {"b": {"c": [1]}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+ def test_merge_recursive_hash_item(self):
+ '''Testing a recursive merge for a hash'''
+ a = {"a" : {"b": {"c": {"d": 1}}}}
+ b = {"a" : {"b": {"c": 2}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+ def test_merge_recursive_array_hash(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : [{"b": {"c": 1}}]}
+ b = {"a" : {"b": {"c": 1}}}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+ def test_merge_recursive_hash_array(self):
+ '''Testing a recursive merge for an array and a hash'''
+ a = {"a" : {"b": {"c": 1}}}
+ b = {"a" : [{"b": {"c": 1}}]}
+ result = {}
+ _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+ self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+# def tearDown(self):
+# '''TearDown method'''
+# pass
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/units/mutli_ec2_test.py b/test/units/mutli_ec2_test.py
deleted file mode 100755
index 95df93cd2..000000000
--- a/test/units/mutli_ec2_test.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_merge_simple_1(self):
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2]})
-
- def test_merge_b_empty(self):
- a = {"key1" : 1}
- b = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- b = {"key1" : 1}
- a = {}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1,2]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
- def test_merge_array_hash(self):
- a = {"key1" : [1,2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- a = {"key1" : [1,2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_array_item(self):
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
- def test_merge_recursive_hash_item(self):
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def tearDown(self):
- pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/utils/docs/config.md b/utils/docs/config.md
index 9399409dd..2729f8d37 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -7,6 +7,7 @@ The default location this config file will be written to ~/.config/openshift/ins
## Example
```
+version: v1
variant: openshift-enterprise
variant_version: 3.0
ansible_ssh_user: root
@@ -18,20 +19,27 @@ hosts:
master: true
node: true
containerized: true
+ connect_to: 24.222.0.1
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
+ connect_to: 10.0.0.2
- ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ connect_to: 10.0.0.3
```
## Primary Settings
+### version
+
+Indicates the version of configuration this file was written with. Current implementation is v1.
+
### variant
The OpenShift variant to install. Currently valid options are:
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index ffcfc5db2..6cdc19f20 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -11,7 +11,7 @@ from ooinstall import OOConfig
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
-DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg'
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
def validate_ansible_dir(path):
@@ -101,29 +101,26 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
- ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')
-
while more_hosts:
host_props = {}
hostname_or_ip = click.prompt('Enter hostname or IP address:',
default='',
value_proc=validate_prompt_hostname)
- if ip_regex.match(hostname_or_ip):
- host_props['ip'] = hostname_or_ip
- else:
- host_props['hostname'] = hostname_or_ip
+ host_props['connect_to'] = hostname_or_ip
host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
host_props['node'] = True
- rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- type=click.Choice(['rpm', 'container']),
- default='rpm')
- if rpm_or_container == 'container':
- host_props['containerized'] = True
- else:
- host_props['containerized'] = False
+ #TODO: Reenable this option once container installs are out of tech preview
+ #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ # type=click.Choice(['rpm', 'container']),
+ # default='rpm')
+ #if rpm_or_container == 'container':
+ # host_props['containerized'] = True
+ #else:
+ # host_props['containerized'] = False
+ host_props['containerized'] = False
host = Host(**host_props)
@@ -150,7 +147,7 @@ Plese confirm that they are correct before moving forward.
notes = """
Format:
-IP,public IP,hostname,public hostname
+connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
@@ -168,20 +165,20 @@ Notes:
default_facts_lines = []
default_facts = {}
- validated_facts = {}
for h in hosts:
- default_facts[h] = {}
- h.ip = callback_facts[str(h)]["common"]["ip"]
- h.public_ip = callback_facts[str(h)]["common"]["public_ip"]
- h.hostname = callback_facts[str(h)]["common"]["hostname"]
- h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"]
-
- validated_facts[h] = {}
- default_facts_lines.append(",".join([h.ip,
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
- output = "%s\n%s" % (output, ",".join([h.ip,
+ output = "%s\n%s" % (output, ",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
@@ -316,14 +313,16 @@ Add new nodes here
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
- if(host.name in callback_facts.keys()
- and 'common' in callback_facts[host.name].keys()
- and callback_facts[host.name]['common'].get('version', '')
- and callback_facts[host.name]['common'].get('version', '') != 'None'):
+ if(host.connect_to in callback_facts.keys()
+ and 'common' in callback_facts[host.connect_to].keys()
+ and callback_facts[host.connect_to]['common'].get('version', '')
+ and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
-def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+# pylint: disable=too-many-branches
+# This pylint error will be corrected shortly in separate PR.
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
hosts_to_run_on = list(oo_cfg.hosts)
@@ -331,7 +330,22 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
- # present a message listing already installed hosts
+ click.echo('Installed environment detected.')
+ # This check has to happen before we start removing hosts later in this method
+ if not force:
+ if not unattended:
+ click.echo('By default the installer only adds new nodes to an installed environment.')
+ response = click.prompt('Do you want to (1) only add additional nodes or ' \
+ '(2) perform a clean install?', type=int)
+ # TODO: this should be reworked with error handling.
+ # Click can certainly do this for us.
+ # This should be refactored as soon as we add a 3rd option.
+ if response == 1:
+ force = False
+ if response == 2:
+ force = True
+
+ # present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
@@ -339,32 +353,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
- hosts_to_run_on.remove(host)
- # for unattended either continue if they force install or exit if they didn't
- if unattended:
- if not force:
- click.echo('Installed environment detected and no additional nodes specified: ' \
- 'aborting. If you want a fresh install, use --force')
- sys.exit(1)
- # for attended ask the user what to do
+ # force is only used for reinstalls so we don't want to remove
+ # anything.
+ if not force:
+ hosts_to_run_on.remove(host)
+
+ # Handle the cases where we know about uninstalled systems
+ new_hosts = set(hosts_to_run_on) - set(installed_hosts)
+ if len(new_hosts) > 0:
+ for new_host in new_hosts:
+ click.echo("{} is currently uninstalled".format(new_host))
+
+ # Fall through
+ click.echo('Adding additional nodes...')
else:
- click.echo('Installed environment detected and no additional nodes specified. ')
- response = click.prompt('Do you want to (1) add more nodes or ' \
- '(2) perform a clean install?', type=int)
- if response == 1: # add more nodes
- new_nodes = collect_new_nodes()
-
- hosts_to_run_on.extend(new_nodes)
- oo_cfg.hosts.extend(new_nodes)
-
- openshift_ansible.set_config(oo_cfg)
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
- if error:
- click.echo("There was a problem fetching the required information. " \
- "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional nodes specified: ' \
+ 'aborting. If you want a fresh install, use ' \
+ '`atomic-openshift-installer install --force`')
sys.exit(1)
else:
- pass # proceeding as normal should do a clean install
+ if not force:
+ new_nodes = collect_new_nodes()
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@@ -385,7 +409,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
- default='/usr/share/openshift-ansible/',
+ default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
@@ -399,9 +423,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
writable=True,
readable=True),
default="/tmp/ansible.log")
+@click.option('-v', '--verbose',
+ is_flag=True, default=False)
#pylint: disable=too-many-arguments
# Main CLI entrypoint, not much we can do about too many arguments.
-def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path):
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
The main click CLI module. Responsible for handling most common CLI options,
assigning any defaults and adding to the context for the sub-commands.
@@ -411,6 +437,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
ctx.obj['configuration'] = configuration
ctx.obj['ansible_config'] = ansible_config
ctx.obj['ansible_log_path'] = ansible_log_path
+ ctx.obj['verbose'] = verbose
oo_cfg = OOConfig(ctx.obj['configuration'])
@@ -441,6 +468,7 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
@click.pass_context
def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
@@ -450,14 +478,53 @@ def uninstall(ctx):
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
sys.exit(0)
- openshift_ansible.run_uninstall_playbook()
+ openshift_ansible.run_uninstall_playbook(verbose)
+
+
+@click.command()
+@click.pass_context
+def upgrade(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
+ sys.exit(1)
+ # Update config to reflect the version we're targetting, we'll write
+ # to disk once ansible completes successfully, not before.
+ old_variant = oo_cfg.settings['variant']
+ old_version = oo_cfg.settings['variant_version']
+ if oo_cfg.settings['variant'] == 'enterprise':
+ oo_cfg.settings['variant'] = 'openshift-enterprise'
+ version = find_variant(oo_cfg.settings['variant'])[1]
+ oo_cfg.settings['variant_version'] = version.name
+ click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
+ old_variant, old_version, oo_cfg.settings['variant'],
+ oo_cfg.settings['variant_version']))
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Upgrade cancelled.")
+ sys.exit(0)
+
+ retcode = openshift_ansible.run_upgrade_playbook(verbose)
+ if retcode > 0:
+ click.echo("Errors encountered during upgrade, please check %s." %
+ oo_cfg.settings['ansible_log_path'])
+ else:
+ oo_cfg.save_to_disk()
+ click.echo("Upgrade completed! Rebooting all hosts is recommended.")
@click.command()
@@ -465,6 +532,7 @@ def uninstall(ctx):
@click.pass_context
def install(ctx, force):
oo_cfg = ctx.obj['oo_cfg']
+ verbose = ctx.obj['verbose']
if ctx.obj['unattended']:
error_if_missing_info(oo_cfg)
@@ -472,13 +540,15 @@ def install(ctx, force):
oo_cfg = get_missing_info_from_user(oo_cfg)
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ verbose)
if error:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
- hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force)
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(
+ oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
click.echo('Writing config to: %s' % oo_cfg.config_path)
@@ -500,7 +570,7 @@ If changes are needed to the values recorded by the installer please update {}.
confirm_continue(message)
error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
- hosts_to_run_on)
+ hosts_to_run_on, verbose)
if error:
# The bootstrap script will print out the log location.
message = """
@@ -523,6 +593,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
click.pause()
cli.add_command(install)
+cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index a2f53cf78..9c97e6e93 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -12,6 +12,7 @@ PERSIST_SETTINGS = [
'ansible_log_path',
'variant',
'variant_version',
+ 'version',
]
REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
@@ -34,6 +35,7 @@ class Host(object):
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -42,30 +44,25 @@ class Host(object):
self.node = kwargs.get('node', False)
self.containerized = kwargs.get('containerized', False)
- if self.ip is None and self.hostname is None:
- raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'")
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either and 'ip' " \
+ "or 'hostname' to connect to.")
if self.master is False and self.node is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
- # Hosts can be specified with an ip, hostname, or both. However we need
- # something authoritative we can connect to and refer to the host by.
- # Preference given to the IP if specified as this is more specific.
- # We know one must be set by this point.
- self.name = self.ip if self.ip is not None else self.hostname
-
def __str__(self):
- return self.name
+ return self.connect_to
def __repr__(self):
- return self.name
+ return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized']:
+ 'master', 'node', 'containerized', 'connect_to']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
@@ -73,7 +70,6 @@ class Host(object):
class OOConfig(object):
- new_config = True
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
@@ -86,19 +82,22 @@ class OOConfig(object):
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.settings = {}
- self.read_config()
- self.set_defaults()
+ self._read_config()
+ self._set_defaults()
- def read_config(self, is_new=False):
+ def _read_config(self):
self.hosts = []
try:
- new_settings = None
if os.path.exists(self.config_path):
cfgfile = open(self.config_path, 'r')
- new_settings = yaml.safe_load(cfgfile.read())
+ self.settings = yaml.safe_load(cfgfile.read())
cfgfile.close()
- if new_settings:
- self.settings = new_settings
+
+ # Use the presence of a Description as an indicator this is
+ # a legacy config file:
+ if 'Description' in self.settings:
+ self._upgrade_legacy_config()
+
# Parse the hosts into DTO objects:
if 'hosts' in self.settings:
for host in self.settings['hosts']:
@@ -114,9 +113,31 @@ class OOConfig(object):
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
- self.new_config = is_new
- def set_defaults(self):
+ def _upgrade_legacy_config(self):
+ new_hosts = []
+ remove_settings = ['validated_facts', 'Description', 'Name',
+ 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
+
+ if 'validated_facts' in self.settings:
+ for key, value in self.settings['validated_facts'].iteritems():
+ value['connect_to'] = key
+ if 'masters' in self.settings and key in self.settings['masters']:
+ value['master'] = True
+ if 'nodes' in self.settings and key in self.settings['nodes']:
+ value['node'] = True
+ new_hosts.append(value)
+ self.settings['hosts'] = new_hosts
+
+ for s in remove_settings:
+ if s in self.settings:
+ del self.settings[s]
+
+ # A legacy config implies openshift-enterprise 3.0:
+ self.settings['variant'] = 'openshift-enterprise'
+ self.settings['variant_version'] = '3.0'
+
+ def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = \
@@ -125,6 +146,8 @@ class OOConfig(object):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
+ if 'version' not in self.settings:
+ self.settings['version'] = 'v1'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
@@ -158,7 +181,7 @@ class OOConfig(object):
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
- result[host.name] = missing_facts
+ result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
@@ -190,6 +213,6 @@ class OOConfig(object):
def get_host(self, name):
for host in self.hosts:
- if host.name == name:
+ if host.connect_to == name:
return host
return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 0def72cfd..e4c808e85 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -16,10 +16,8 @@ def set_config(cfg):
CFG = cfg
def generate_inventory(hosts):
- print hosts
global CFG
- installer_host = socket.gethostname()
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
@@ -33,25 +31,18 @@ def generate_inventory(hosts):
version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
- if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:'
- '5001/openshift3/ose-${component}:${version}\n')
- if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ:
- base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', "
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_additional_registries={}\n'
+ .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_insecure_registries={}\n'
+ .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
"'name': 'ose-devel', "
- "'baseurl': 'http://buildvm-devops.usersys.redhat.com"
- "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', "
- "'enabled': 1, 'gpgcheck': 0}]\n")
- if 'OO_INSTALL_STAGE_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n')
-
- if any(host.hostname == installer_host or host.public_hostname == installer_host
- for host in hosts):
- no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive'])
- if no_pwd_sudo == 1:
- print 'The atomic-openshift-installer requires sudo access without a password.'
- sys.exit(1)
- base_inventory.write("ansible_connection=local\n")
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
base_inventory.write('\n[masters]\n')
masters = (host for host in hosts if host.master)
@@ -73,6 +64,7 @@ def generate_inventory(hosts):
def write_host(host, inventory, scheduleable=True):
global CFG
+
facts = ''
if host.ip:
facts += ' openshift_ip={}'.format(host.ip)
@@ -86,19 +78,30 @@ def write_host(host, inventory, scheduleable=True):
# Technically only nodes will ever need this.
if not scheduleable:
facts += ' openshift_scheduleable=False'
- inventory.write('{} {}\n'.format(host, facts))
+ installer_host = socket.gethostname()
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=true'
+
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
-def load_system_facts(inventory_file, os_facts_path, env_vars):
+def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
FNULL = open(os.devnull, 'w')
- status = subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory_file),
- os_facts_path],
- env=env_vars,
- stdout=FNULL)
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path])
+ status = subprocess.call(args, env=env_vars, stdout=FNULL)
if not status == 0:
return [], 1
callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
@@ -107,7 +110,7 @@ def load_system_facts(inventory_file, os_facts_path, env_vars):
return callback_facts, 0
-def default_facts(hosts):
+def default_facts(hosts, verbose=False):
global CFG
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
@@ -119,12 +122,12 @@ def default_facts(hosts):
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return load_system_facts(inventory_file, os_facts_path, facts_env)
+ return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
-def run_main_playbook(hosts, hosts_to_run_on):
+def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
global CFG
- inventory_file = generate_inventory(hosts)
+ inventory_file = generate_inventory(hosts_to_run_on)
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/common/openshift-cluster/scaleup.yml')
@@ -136,16 +139,19 @@ def run_main_playbook(hosts, hosts_to_run_on):
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(main_playbook_path, inventory_file, facts_env)
+ return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
+
+def run_ansible(playbook, inventory, env_vars, verbose=False):
+ args = ['ansible-playbook', '-v'] if verbose \
+ else ['ansible-playbook']
+ args.extend([
+ '--inventory-file={}'.format(inventory),
+ playbook])
+ return subprocess.call(args, env=env_vars)
-def run_ansible(playbook, inventory, env_vars):
- return subprocess.call(['ansible-playbook',
- '--inventory-file={}'.format(inventory),
- playbook],
- env=env_vars)
-def run_uninstall_playbook():
+def run_uninstall_playbook(verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(CFG.hosts)
@@ -154,4 +160,20 @@ def run_uninstall_playbook():
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
- return run_ansible(playbook, inventory_file, facts_env)
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
+
+def run_upgrade_playbook(verbose=False):
+ # TODO: do not hardcode the upgrade playbook, add ability to select the
+ # right playbook depending on the type of upgrade.
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml')
+ # TODO: Upgrade inventory for upgrade?
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env, verbose)
+
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 05281d654..3bb61dddb 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -29,6 +29,9 @@ class Variant(object):
self.versions = versions
+ def latest_version(self):
+ return self.versions[-1]
+
# WARNING: Keep the versions ordered, most recent last:
OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
@@ -58,7 +61,7 @@ def find_variant(name, version=None):
for prod in SUPPORTED_VARIANTS:
if prod.name == name:
if version is None:
- return (prod, prod.versions[-1])
+ return (prod, prod.latest_version())
for v in prod.versions:
if v.name == version:
return (prod, v)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index b183f0acb..d58539b18 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -46,18 +46,21 @@ SAMPLE_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
- - ip: 10.0.0.1
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- - ip: 10.0.0.3
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
@@ -329,7 +332,7 @@ class AttendedCliTests(OOCliFixture):
for (host, is_master) in hosts:
inputs.append(host)
inputs.append('y' if is_master else 'n')
- inputs.append('rpm')
+ #inputs.append('rpm')
if i < len(hosts) - 1:
inputs.append('y') # Add more hosts
else:
@@ -346,7 +349,7 @@ class AttendedCliTests(OOCliFixture):
for (host, is_master) in add_nodes:
inputs.append(host)
inputs.append('y' if is_master else 'n')
- inputs.append('rpm')
+ #inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
else:
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 01af33fd9..0dd4a30e9 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -14,36 +14,62 @@ SAMPLE_CONFIG = """
variant: openshift-enterprise
ansible_ssh_user: root
hosts:
- - ip: 10.0.0.1
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
node: true
- - ip: 10.0.0.2
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
- - ip: 10.0.0.3
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
"""
+# Used to test automatic upgrading of config:
+LEGACY_CONFIG = """
+Description: This is the configuration file for the OpenShift Ansible-Based Installer.
+Name: OpenShift Ansible-Based Installer Configuration
+Subscription: {type: none}
+Vendor: OpenShift Community
+Version: 0.0.1
+ansible_config: /tmp/notreal/ansible.cfg
+ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible
+ansible_log_path: /tmp/ansible.log
+ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins
+masters: [10.0.0.1]
+nodes: [10.0.0.2, 10.0.0.3]
+validated_facts:
+ 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1}
+ 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2}
+ 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3}
+"""
+
+
CONFIG_INCOMPLETE_FACTS = """
hosts:
- - ip: 10.0.0.1
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
master: true
- - ip: 10.0.0.2
- hostname: node1-private.example.com
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
public_ip: 24.222.0.2
node: true
- - ip: 10.0.0.3
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
node: true
"""
@@ -74,6 +100,48 @@ class OOInstallFixture(unittest.TestCase):
return path
+class LegacyOOConfigTests(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), LEGACY_CONFIG)
+ self.cfg = OOConfig(self.cfg_path)
+
+ def test_load_config_memory(self):
+ self.assertEquals('openshift-enterprise', self.cfg.settings['variant'])
+ self.assertEquals('3.0', self.cfg.settings['variant_version'])
+ self.assertEquals('v1', self.cfg.settings['version'])
+
+ self.assertEquals(3, len(self.cfg.hosts))
+ h1 = self.cfg.get_host('10.0.0.1')
+ self.assertEquals('10.0.0.1', h1.ip)
+ self.assertEquals('24.222.0.1', h1.public_ip)
+ self.assertEquals('master-private.example.com', h1.hostname)
+ self.assertEquals('master.example.com', h1.public_hostname)
+
+ h2 = self.cfg.get_host('10.0.0.2')
+ self.assertEquals('10.0.0.2', h2.ip)
+ self.assertEquals('24.222.0.2', h2.public_ip)
+ self.assertEquals('node1-private.example.com', h2.hostname)
+ self.assertEquals('node1.example.com', h2.public_hostname)
+
+ h3 = self.cfg.get_host('10.0.0.3')
+ self.assertEquals('10.0.0.3', h3.ip)
+ self.assertEquals('24.222.0.3', h3.public_ip)
+ self.assertEquals('node2-private.example.com', h3.hostname)
+ self.assertEquals('node2.example.com', h3.public_hostname)
+
+ self.assertFalse('masters' in self.cfg.settings)
+ self.assertFalse('nodes' in self.cfg.settings)
+ self.assertFalse('Description' in self.cfg.settings)
+ self.assertFalse('Name' in self.cfg.settings)
+ self.assertFalse('Subscription' in self.cfg.settings)
+ self.assertFalse('Vendor' in self.cfg.settings)
+ self.assertFalse('Version' in self.cfg.settings)
+ self.assertFalse('validates_facts' in self.cfg.settings)
+
+
class OOConfigTests(OOInstallFixture):
def test_load_config(self):
@@ -83,7 +151,7 @@ class OOConfigTests(OOInstallFixture):
ooconfig = OOConfig(cfg_path)
self.assertEquals(3, len(ooconfig.hosts))
- self.assertEquals("10.0.0.1", ooconfig.hosts[0].name)
+ self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to)
self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip)
self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname)
@@ -91,6 +159,7 @@ class OOConfigTests(OOInstallFixture):
[host['ip'] for host in ooconfig.settings['hosts']])
self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+ self.assertEquals('v1', ooconfig.settings['version'])
def test_load_complete_facts(self):
cfg_path = self.write_config(os.path.join(self.work_dir,
@@ -128,6 +197,7 @@ class OOConfigTests(OOInstallFixture):
self.assertTrue('ansible_ssh_user' in written_config)
self.assertTrue('variant' in written_config)
+ self.assertEquals('v1', written_config['version'])
# Some advanced settings should not get written out if they
# were not specified by the user: