summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/oo_filters.py83
-rw-r--r--inventory/byo/hosts.example51
-rw-r--r--openshift-ansible.spec47
-rw-r--r--playbooks/adhoc/uninstall.yml12
-rwxr-xr-xplaybooks/adhoc/upgrades/library/openshift_upgrade_config.py117
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml121
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml8
-rw-r--r--playbooks/byo/openshift-cluster/config.yml1
-rw-r--r--playbooks/common/openshift-cluster/config.yml67
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml76
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml16
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml (renamed from playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml)0
-rw-r--r--playbooks/common/openshift-master/config.yml97
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml2
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml8
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml1
-rw-r--r--roles/haproxy/README.md34
-rw-r--r--roles/haproxy/defaults/main.yml21
-rw-r--r--roles/haproxy/handlers/main.yml5
-rw-r--r--roles/haproxy/meta/main.yml14
-rw-r--r--roles/haproxy/tasks/main.yml25
-rw-r--r--roles/haproxy/templates/haproxy.cfg.j276
-rw-r--r--roles/kube_nfs_volumes/README.md3
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml13
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 (renamed from roles/kube_nfs_volumes/templates/nfs.json.j2)0
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py75
-rw-r--r--roles/openshift_master/handlers/main.yml10
-rw-r--r--roles/openshift_master/tasks/main.yml108
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.service.j221
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.service.j222
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j231
-rw-r--r--roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j27
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_ca/tasks/main.yml4
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml3
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml5
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml11
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j21
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--utils/docs/config.md8
-rw-r--r--utils/src/ooinstall/cli_installer.py149
-rw-r--r--utils/src/ooinstall/oo_config.py66
-rw-r--r--utils/src/ooinstall/openshift_ansible.py59
-rw-r--r--utils/src/ooinstall/variants.py5
-rw-r--r--utils/test/oo_config_tests.py64
59 files changed, 1341 insertions, 260 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 92f545b25..6046a1a86 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.6-1 ./
+3.0.7-1 ./
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index a57b0f895..f494c0ae5 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
from ansible import errors
from operator import itemgetter
+import OpenSSL.crypto
+import os.path
import pdb
import re
import json
@@ -241,6 +243,21 @@ class FilterModule(object):
return string.split(separator)
@staticmethod
+ def oo_haproxy_backend_masters(hosts):
+ ''' This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ '''
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server_port = host_info['openshift']['master']['api_port']
+ server['address'] = "%s:%s" % (server_ip, server_port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+ @staticmethod
def oo_filter_list(data, filter_attr=None):
''' This returns a list, which contains all items where filter_attr
evaluates to true
@@ -327,6 +344,68 @@ class FilterModule(object):
return revamped_outputs
+ @staticmethod
+ # pylint: disable=too-many-branches
+ def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+ ''' Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key" }]
+
+ returns [{ "certfile": "/etc/origin/master/custom1.crt",
+ "keyfile": "/etc/origin/master/custom1.key",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/custom2.crt",
+ "keyfile": "/etc/origin/master/custom2.key",
+ "names": [ "some-hostname.com" ] }]
+ '''
+ if not issubclass(type(certificates), list):
+ raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+ if not issubclass(type(data_dir), unicode):
+ raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+ if not issubclass(type(internal_hostnames), list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
+
+ if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+ # Unable to find cert/key, try to prepend data_dir to paths
+ certificate['certfile'] = os.path.join(data_dir, certificate['certfile'])
+ certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile'])
+ if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+ # Unable to find cert/key in data_dir
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ certificate['names'] = list(set(certificate['names']))
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+ return certificates
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -342,5 +421,7 @@ class FilterModule(object):
"oo_combine_dict": self.oo_combine_dict,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+ "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+ "oo_parse_certificate_names": self.oo_parse_certificate_names,
+ "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
}
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index ad19fe116..11f076a8a 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -5,6 +5,7 @@
masters
nodes
etcd
+lb
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
@@ -57,21 +58,29 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Set cockpit plugins
#osm_cockpit_plugins=['cockpit-kubernetes']
-# master cluster ha variables using pacemaker or RHEL HA
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
#openshift_master_cluster_password=openshift_cluster
#openshift_master_cluster_vip=192.168.133.25
#openshift_master_cluster_public_vip=192.168.133.25
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-# master cluster ha variables when using a different HA solution
-# For installation the value of openshift_master_cluster_hostname must resolve
-# to the first master defined in the inventory.
-# The HA solution must be manually configured after installation and must ensure
-# that the master is running on a single master host.
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_defer_ha=True
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
# default subdomain to use for exposed routes
#osm_default_subdomain=apps.test.example.com
@@ -99,6 +108,27 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -106,6 +136,9 @@ ose3-master[1:3]-ansible.test.example.com
[etcd]
ose3-etcd[1:3]-ansible.test.example.com
+[lb]
+ose3-lb-ansible.test.example.com
+
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 8ea9120f2..10a53d921 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.6
+Version: 3.0.7
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -104,6 +104,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
%files bin
%{_bindir}/*
+%exclude %{_bindir}/atomic-openshift-installer
%{python_sitelib}/openshift_ansible/
/etc/bash_completion.d/*
%config(noreplace) /etc/openshift_ansible/
@@ -170,6 +171,9 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
%package playbooks
Summary: Openshift and Atomic Enterprise Ansible Playbooks
Requires: %{name}
+Requires: %{name}-roles
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
BuildArch: noarch
%description playbooks
@@ -185,6 +189,8 @@ BuildArch: noarch
%package roles
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name}
+Requires: %{name}-lookup-plugins
+Requires: %{name}-filter-plugins
BuildArch: noarch
%description roles
@@ -249,6 +255,45 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+ (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+ (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+ (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+ warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+ (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+ (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+ (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+ (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
* Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
- Adding python-boto and python-libcloud to openshift-ansible-inventory
dependency (kwoodson@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 0503b7cd4..e05ab43f8 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -45,6 +45,7 @@
- origin-master-api
- origin-master-controllers
- origin-node
+ - pcsd
- yum: name={{ item }} state=absent
when: not is_atomic | bool
@@ -59,6 +60,7 @@
- atomic-openshift-node
- atomic-openshift-sdn-ovs
- etcd
+ - corosync
- openshift
- openshift-master
- openshift-node
@@ -69,6 +71,8 @@
- origin-master
- origin-node
- origin-sdn-ovs
+ - pacemaker
+ - pcs
- tuned-profiles-atomic-enterprise-node
- tuned-profiles-atomic-openshift-node
- tuned-profiles-openshift-node
@@ -136,8 +140,10 @@
- file: path={{ item }} state=absent
with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
+ - /etc/corosync
- /etc/etcd
- /etc/openshift
- /etc/openshift-sdn
@@ -151,9 +157,13 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-node
- /root/.kube
- - "~{{ ansible_ssh_user }}/.kube"
+ - /run/openshift-sdn
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/etcd
- /var/lib/openshift
- /var/lib/origin
+ - /var/lib/pacemaker
+
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
new file mode 100755
index 000000000..60f4fd8b8
--- /dev/null
+++ b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+"""Ansible module for modifying OpenShift configs during an upgrade"""
+
+import os
+import shutil
+import yaml
+
+from datetime import datetime
+
+DOCUMENTATION = '''
+---
+module: openshift_upgrade_config
+short_description: OpenShift Upgrade Config
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+def get_cfg_dir():
+ """Return the correct config directory to use."""
+ cfg_path = '/etc/origin/'
+ if not os.path.exists(cfg_path):
+ cfg_path = '/etc/openshift/'
+ return cfg_path
+
+
+def upgrade_master_3_0_to_3_1(backup):
+ """Main upgrade method for 3.0 to 3.1."""
+ changed = False
+
+ # Facts do not get transferred to the hosts where custom modules run,
+ # need to make some assumptions here.
+ master_config = os.path.join(get_cfg_dir(), 'master/master-config.yaml')
+
+ master_cfg_file = open(master_config, 'r')
+ config = yaml.safe_load(master_cfg_file.read())
+ master_cfg_file.close()
+
+ # Remove v1beta3 from apiLevels:
+ if 'apiLevels' in config and \
+ 'v1beta3' in config['apiLevels']:
+ config['apiLevels'].remove('v1beta3')
+ changed = True
+ if 'apiLevels' in config['kubernetesMasterConfig'] and \
+ 'v1beta3' in config['kubernetesMasterConfig']['apiLevels']:
+ config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3')
+ changed = True
+
+ # Add the new master proxy client certs:
+ # TODO: re-enable this once these certs are generated during upgrade:
+# if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
+# config['kubernetesMasterConfig']['proxyClientInfo'] = {
+# 'certFile': 'master.proxy-client.crt',
+# 'keyFile': 'master.proxy-client.key'
+# }
+
+ if changed:
+ if backup:
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ basedir = os.path.split(master_config)[0]
+ backup_file = os.path.join(basedir, 'master-config.yaml.bak-%s'
+ % timestamp)
+ shutil.copyfile(master_config, backup_file)
+ # Write the modified config:
+ out_file = open(master_config, 'w')
+ out_file.write(yaml.safe_dump(config, default_flow_style=False))
+ out_file.close()
+
+ return changed
+
+
+def upgrade_master(from_version, to_version, backup):
+ """Upgrade entry point."""
+ if from_version == '3.0':
+ if to_version == '3.1':
+ return upgrade_master_3_0_to_3_1(backup)
+
+
+def main():
+ """ main """
+ # disabling pylint errors for global-variable-undefined and invalid-name
+ # for 'global module' usage, since it is required to use ansible_facts
+ # pylint: disable=global-variable-undefined, invalid-name
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ from_version=dict(required=True, choices=['3.0']),
+ to_version=dict(required=True, choices=['3.1']),
+ role=dict(required=True, choices=['master']),
+ backup=dict(required=False, default=True, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ from_version = module.params['from_version']
+ to_version = module.params['to_version']
+ role = module.params['role']
+ backup = module.params['backup']
+
+ changed = False
+ if role == 'master':
+ changed = upgrade_master(from_version, to_version, backup)
+
+ return module.exit_json(changed=changed)
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
index ae1d0127c..09f991b1d 100644
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -1,4 +1,57 @@
---
+- name: Update deployment type
+ hosts: OSEv3
+ roles:
+ - openshift_facts
+ post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit.
+ - openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ deployment_type }}"
+
+- name: Verify upgrade can proceed
+ hosts: masters
+ tasks:
+ # Checking the global deployment type rather than host facts, this is about
+ # what the user is requesting.
+ - fail: msg="Deployment type enterprise not supported for upgrade"
+ when: deployment_type == "enterprise"
+
+- name: Backup etcd
+ hosts: masters
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True
+ - name: Check available disk space for etcd backup
+ # We assume to be using the data dir for all backups.
+ shell: >
+ df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ - name: Check current embedded etcd disk usage
+ shell: >
+ du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ - name: Install etcd (for etcdctl)
+ yum: pkg=etcd state=latest
+ - name: Generate etcd backup
+ command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+ - name: Display location of etcd backup
+ debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
- name: Upgrade base package on masters
hosts: masters
roles:
@@ -9,22 +62,58 @@
- name: Upgrade base package
yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
-- name: Re-Run cluster configuration to apply latest configuration changes
- include: ../../common/openshift-cluster/config.yml
+- name: Evaluate oo_first_master
+ hosts: localhost
vars:
- g_etcd_group: "{{ 'etcd' }}"
g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
+ tasks:
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups[g_masters_group][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+# TODO: ideally we would check the new version, without installing it. (some
+# kind of yum repoquery? would need to handle openshift -> atomic-openshift
+# package rename)
+- name: Perform upgrade version checking
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ tasks:
+ # Checking the global deployment type rather than host facts, this is about
+ # what the user is requesting.
+ - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
+ when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))
- name: Upgrade masters
hosts: masters
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
+ - name: Upgrade to latest available kernel
+ yum: pkg=kernel state=latest
+ - name: display just the deployment_type variable for the current host
+ debug:
+ var: hostvars[inventory_hostname]
- name: Upgrade master packages
- yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+ command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
+ - name: Upgrade master configuration.
+ openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
- name: Restart master services
service: name="{{ openshift.common.service_type}}-master" state=restarted
@@ -32,26 +121,14 @@
hosts: nodes
vars:
openshift_version: "{{ openshift_pkg_version | default('') }}"
+ roles:
+ - openshift_facts
tasks:
- name: Upgrade node packages
- yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+ command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
- name: Restart node services
service: name="{{ openshift.common.service_type }}-node" state=restarted
-- name: Determine new master version
- hosts: oo_first_master
- tasks:
- - name: Determine new version
- command: >
- rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
- register: _new_version
-
-- name: Ensure AOS 3.0.2 or Origin 1.0.6
- hosts: oo_first_master
- tasks:
- fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
- when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
-
- name: Update cluster policy
hosts: oo_first_master
tasks:
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index a8e3e27bb..5aa6b0f9b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -11,6 +11,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 786918929..09bf34666 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -11,7 +11,7 @@
msg: Deployment type not supported for aws provider yet
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -19,7 +19,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -27,7 +27,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -38,7 +38,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9e50a4a18..411c7e660 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -4,6 +4,7 @@
g_etcd_group: "{{ 'etcd' }}"
g_masters_group: "{{ 'masters' }}"
g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4c74f96db..a8bd634d3 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,68 +1,5 @@
---
-- name: Populate config host groups
- hosts: localhost
- gather_facts: no
- tasks:
- - fail:
- msg: This playbook rquires g_etcd_group to be set
- when: g_etcd_group is not defined
-
- - fail:
- msg: This playbook rquires g_masters_group to be set
- when: g_masters_group is not defined
-
- - fail:
- msg: This playbook rquires g_nodes_group to be set
- when: g_nodes_group is not defined
-
- - name: Evaluate oo_etcd_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_etcd_group] | default([])
-
- - name: Evaluate oo_masters_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_masters_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
-
- - name: Evaluate oo_nodes_to_config
- add_host:
- name: "{{ item }}"
- groups: oo_nodes_to_config
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
-
- - name: Evaluate oo_first_etcd
- add_host:
- name: "{{ groups[g_etcd_group][0] }}"
- groups: oo_first_etcd
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
- - name: Evaluate oo_first_master
- add_host:
- name: "{{ groups[g_masters_group][0] }}"
- groups: oo_first_master
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
- when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: evaluate_groups.yml
- include: ../openshift-etcd/config.yml
@@ -71,4 +8,4 @@
- include: ../openshift-node/config.yml
vars:
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
new file mode 100644
index 000000000..2bb69614f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -0,0 +1,76 @@
+---
+- name: Populate config host groups
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: This playbook requires g_etcd_group to be set
+ when: g_etcd_group is not defined
+
+ - fail:
+ msg: This playbook requires g_masters_group to be set
+ when: g_masters_group is not defined
+
+ - fail:
+ msg: This playbook requires g_nodes_group to be set
+ when: g_nodes_group is not defined
+
+ - fail:
+ msg: This playbook requires g_lb_group to be set
+ when: g_lb_group is not defined
+
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_etcd_group] | default([])
+
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_nodes_group] | default([])
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_masters_group] | default([])
+ when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ groups[g_etcd_group][0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups[g_masters_group][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+
+ - name: Evaluate oo_lb_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_lb_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: groups[g_lb_group] | default([])
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..6d2777732
--- /dev/null
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -0,0 +1,16 @@
+---
+- include: evaluate_groups.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: ../openshift-node/config.yml
+ vars:
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..1a6580795 100644
--- a/playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..36d7b7870 100644
--- a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..278942f8b 100644
--- a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1dec923fc..64376040f 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -34,7 +34,9 @@
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: master
local_facts:
@@ -44,7 +46,6 @@
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
@@ -168,6 +169,10 @@
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
+ master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- openshift_master_certificates
@@ -199,12 +204,84 @@
validate_checksum: yes
with_items: masters_needing_certs
+- name: Inspect named certificates
+ hosts: oo_first_master
+ tasks:
+ - name: Collect certificate names
+ set_fact:
+ parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+ when: openshift_master_named_certificates is defined
+
+- name: Compute haproxy_backend_servers
+ hosts: localhost
+ connection: local
+ sudo: false
+ gather_facts: no
+ tasks:
+ - set_fact:
+ haproxy_backend_servers: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_haproxy_backend_masters }}"
+
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_frontends:
+ - name: atomic-openshift-api
+ mode: tcp
+ options:
+ - tcplog
+ binds:
+ - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
+ default_backend: atomic-openshift-api
+ haproxy_backends:
+ - name: atomic-openshift-api
+ mode: tcp
+ option: tcplog
+ balance: source
+ servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
+ roles:
+ - role: haproxy
+ when: groups.oo_masters_to_config | length > 1
+
+- name: Generate master session keys
+ hosts: oo_first_master
+ tasks:
+ - fail:
+ msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
+ - fail:
+ msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
+ when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+ - name: Generate session authentication key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_auth_output
+ with_sequence: count=1
+ when: openshift_master_session_auth_secrets is undefined
+ - name: Generate session encryption key
+ command: /usr/bin/openssl rand -base64 24
+ register: session_encryption_output
+ with_sequence: count=1
+ when: openshift_master_session_encryption_secrets is undefined
+ - set_fact:
+ session_auth_secret: "{{ openshift_master_session_auth_secrets
+ | default(session_auth_output.results
+ | map(attribute='stdout')
+ | list) }}"
+ session_encryption_secret: "{{ openshift_master_session_encryption_secrets
+ | default(session_encryption_output.results
+ | map(attribute='stdout')
+ | list) }}"
+
- name: Configure master instances
hosts: oo_masters_to_config
+ serial: 1
vars:
+ named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
+ openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
+ openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -233,11 +310,25 @@
omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
roles:
- role: openshift_master_cluster
- when: openshift_master_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
+- name: Determine cluster dns ip
+ hosts: oo_first_master
+ tasks:
+ - name: Get master service ip
+ command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
+ register: master_service_ip_output
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - set_fact:
+ cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
+ when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+
- name: Enable cockpit
hosts: oo_first_master
vars:
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 6ca4f7395..745161bcb 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -16,6 +16,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
index 0dfa3e9d7..c8f6065cd 100644
--- a/playbooks/gce/openshift-cluster/join_node.yml
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -46,4 +46,4 @@
openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index c22b897d5..8be5d53e7 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -9,7 +9,7 @@
- fail: msg="Deployment type not supported for gce provider yet"
when: deployment_type == 'enterprise'
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -17,7 +17,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -28,7 +28,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index c208eee81..4d1ae22ff 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index d3e768de5..8d7949dd1 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -17,7 +17,7 @@
- include: tasks/configure_libvirt.yml
- - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ etcd_names }}"
@@ -25,7 +25,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
instances: "{{ master_names }}"
@@ -33,7 +33,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "default"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
count: "{{ num_nodes }}"
@@ -44,7 +44,7 @@
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "infra"
count: "{{ num_infra }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index a5ee2d6a5..888804e28 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -10,6 +10,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md
new file mode 100644
index 000000000..5bc415066
--- /dev/null
+++ b/roles/haproxy/README.md
@@ -0,0 +1,34 @@
+HAProxy
+=======
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
new file mode 100644
index 000000000..7ba5bd485
--- /dev/null
+++ b/roles/haproxy/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+haproxy_frontends:
+- name: main
+ binds:
+ - "*:80"
+ default_backend: default
+
+haproxy_backends:
+- name: default
+ balance: roundrobin
+ servers:
+ - name: web01
+ address: 127.0.0.1:9000
+ opts: check
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: haproxy stats
+ port: "9000/tcp"
+- service: haproxy balance
+ port: "8443/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
new file mode 100644
index 000000000..ee60adcab
--- /dev/null
+++ b/roles/haproxy/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart haproxy
+ service:
+ name: haproxy
+ state: restarted
diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml
new file mode 100644
index 000000000..0fad106a9
--- /dev/null
+++ b/roles/haproxy/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: HAProxy
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- { role: os_firewall }
+- { role: openshift_repos }
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
new file mode 100644
index 000000000..5638b7313
--- /dev/null
+++ b/roles/haproxy/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install haproxy
+ yum:
+ pkg: haproxy
+ state: present
+
+- name: Configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: root
+ group: root
+ mode: 0644
+ notify: restart haproxy
+
+- name: Enable and start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Pause 30 seconds if haproxy was just started
+ pause: seconds=30
+ when: start_result | changed
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2
new file mode 100644
index 000000000..c932af72f
--- /dev/null
+++ b/roles/haproxy/templates/haproxy.cfg.j2
@@ -0,0 +1,76 @@
+# Global settings
+#---------------------------------------------------------------------
+global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 300s
+ timeout server 300s
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+listen stats :9000
+ mode http
+ stats enable
+ stats uri /
+
+{% for frontend in haproxy_frontends %}
+frontend {{ frontend.name }}
+{% for bind in frontend.binds %}
+ bind {{ bind }}
+{% endfor %}
+ default_backend {{ frontend.default_backend }}
+{% if 'mode' in frontend %}
+ mode {{ frontend.mode }}
+{% endif %}
+{% if 'options' in frontend %}
+{% for option in frontend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% if 'redirects' in frontend %}
+{% for redirect in frontend.redirects %}
+ redirect {{ redirect }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
+{% for backend in haproxy_backends %}
+backend {{ backend.name }}
+ balance {{ backend.balance }}
+{% if 'mode' in backend %}
+ mode {{ backend.mode }}
+{% endif %}
+{% if 'options' in backend %}
+{% for option in backend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% for server in backend.servers %}
+ server {{ server.name }} {{ server.address }} {{ server.opts }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 56c69c286..1520f79b2 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443
# Token to use for authentication to the API server
kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+# API Version to use for kubernetes
+kube_api_version: v1
```
## Dependencies
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
index e296492f9..bdd994d07 100644
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -1,4 +1,10 @@
---
+kubernetes_url: https://172.30.0.1:443
+
+kube_api_version: v1
+
+kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
+
# Options of NFS exports.
nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index f4a506234..d1dcf261a 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -16,10 +16,11 @@
- include: nfs.yml
- name: export physical volumes
- uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
- method=POST
- body='{{ lookup("template", "../templates/nfs.json.j2") }}'
- body_format=json
- status_code=201
- HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ uri:
+ url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
+ method: POST
+ body: "{{ lookup('template', kube_req_template) }}"
+ body_format: json
+ status_code: 201
+ HEADER_Authorization: "Bearer {{ kubernetes_token }}"
with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
new file mode 120000
index 000000000..49c1191bc
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
@@ -0,0 +1 @@
+../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
index b42886ef1..b42886ef1 100644
--- a/roles/kube_nfs_volumes/templates/nfs.json.j2
+++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index bce6a8745..f6919dada 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -1,7 +1,11 @@
---
- yum:
- name: openshift-ansible-inventory
+ name: "{{ item }}"
state: present
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
- name:
copy:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 163e67f62..1ba5fc13b 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -407,7 +407,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type == 'enterprise':
+ if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -484,12 +484,16 @@ def set_aggregate_facts(facts):
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
+ internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ internal_hostnames.add(facts['common']['hostname'])
+ internal_hostnames.add(facts['common']['ip'])
+
if 'master' in facts:
# FIXME: not sure why but facts['dns']['domain'] fails
cluster_domain = 'cluster.local'
@@ -497,13 +501,23 @@ def set_aggregate_facts(facts):
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
- all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc',
- 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
- 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain])
+ svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+ 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+ 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+ all_hostnames.update(svc_names)
+ internal_hostnames.update(svc_names)
first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
all_hostnames.add(first_svc_ip)
+ internal_hostnames.add(first_svc_ip)
+
+ if facts['master']['embedded_etcd']:
+ facts['master']['etcd_data_dir'] = os.path.join(
+ facts['common']['data_dir'], 'openshift.local.etcd')
+ else:
+ facts['master']['etcd_data_dir'] = '/var/lib/etcd'
facts['common']['all_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(all_hostnames)
return facts
@@ -540,15 +554,6 @@ def set_deployment_facts_if_unset(facts):
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
- facts['common']['version'] = version = get_openshift_version()
- if version is not None:
- if deployment_type == 'origin':
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
- else:
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
- else:
- version_gt_3_1_or_1_1 = True
- facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
for role in ('master', 'node'):
if role in facts:
@@ -582,12 +587,34 @@ def set_deployment_facts_if_unset(facts):
return facts
+def set_version_facts_if_unset(facts):
+ """ Set version facts. This currently includes common.version and
+ common.version_greater_than_3_1_or_1_1.
-def set_sdn_facts_if_unset(facts):
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with version facts.
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ facts['common']['version'] = version = get_openshift_version()
+ if version is not None:
+ if deployment_type == 'origin':
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+ else:
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+ else:
+ version_gt_3_1_or_1_1 = True
+ facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
+ return facts
+
+def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
+ system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
@@ -606,9 +633,18 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
- if 'node' in facts:
- if 'sdn_mtu' not in facts['node']:
- facts['node']['sdn_mtu'] = '1450'
+ if 'node' in facts and 'sdn_mtu' not in facts['node']:
+ node_ip = facts['common']['ip']
+
+ # default MTU if interface MTU cannot be detected
+ facts['node']['sdn_mtu'] = '1450'
+
+ for val in system_facts.itervalues():
+ if isinstance(val, dict) and 'mtu' in val:
+ mtu = val['mtu']
+
+ if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
+ facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
@@ -879,8 +915,9 @@ class OpenShiftFacts(object):
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
return dict(openshift=facts)
@@ -920,7 +957,7 @@ class OpenShiftFacts(object):
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
- oauth_grant_method='auto', cluster_defer_ha=False)
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 37028e0f6..4b9500cbd 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,3 +2,13 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 3a886935f..be77fce4a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -9,16 +9,22 @@
when: openshift_master_oauth_grant_method is defined
- fail:
+ msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+- fail:
+ msg: "'native' high availability is not supported for the requested OpenShift version"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool
+- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
- name: Set master facts
openshift_facts:
role: master
local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -41,6 +47,8 @@
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
@@ -63,6 +71,8 @@
controller_args: "{{ osm_controller_args | default(None) }}"
infra_nodes: "{{ num_infra | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
+ master_count: "{{ openshift_master_count | default(None) }}"
+ controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
@@ -77,7 +87,7 @@
domain: cluster.local
when: openshift.master.embedded_dns
-- name: Create config parent directory if it doesn't exist
+- name: Create config parent directory if it does not exist
file:
path: "{{ openshift_master_config_dir }}"
state: directory
@@ -90,6 +100,8 @@
creates: "{{ openshift_master_policy }}"
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
template:
@@ -98,6 +110,8 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
@@ -120,6 +134,39 @@
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+# workaround for missing systemd unit files for controllers/api
+- name: Create the api service file
+ template:
+ src: atomic-openshift-master-api.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+ force: no
+- name: Create the controllers service file
+ template:
+ src: atomic-openshift-master-controllers.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+ force: no
+- name: Create the api env file
+ template:
+ src: atomic-openshift-master-api.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ force: no
+- name: Create the controllers env file
+ template:
+ src: atomic-openshift-master-controllers.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ force: no
+- command: systemctl daemon-reload
+# end workaround for missing systemd unit files
+
+- name: Create session secrets file
+ template:
+ dest: "{{ openshift.master.session_secrets_file }}"
+ src: sessionSecretsFile.yaml.v1.j2
+ force: no
+ notify:
+ - restart master
+ - restart master api
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
@@ -128,12 +175,15 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Configure master settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
+ create: yes
with_items:
- regex: '^OPTIONS='
line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
@@ -142,6 +192,32 @@
notify:
- restart master
+- name: Configure master api settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ notify:
+ - restart master api
+
+- name: Configure master controller settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ notify:
+ - restart master controllers
+
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
@@ -149,15 +225,37 @@
- set_fact:
master_service_status_changed = start_result | changed
+ when: not openshift_master_ha | bool
+
+- name: Start and enable master api
+ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+
+- set_fact:
+ master_api_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+
+# TODO: fix the ugly workaround of setting ignore_errors
+# the controllers service tries to start even if it is already started
+- name: Start and enable master controller
+ service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+ ignore_errors: yes
+
+- set_fact:
+ master_controllers_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- name: Set the cluster user password
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
new file mode 100644
index 000000000..ba19fb348
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
new file mode 100644
index 000000000..8952c86ef
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 73a0bc6cc..d4a6590ea 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -10,24 +10,34 @@ assetConfig:
publicURL: {{ openshift.master.public_console_url }}/
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_ha | bool %}
+controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
+{% endif %}
+controllers: '*'
corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- {{ origin }}
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+ - {{ name }}
+{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
{% if openshift.master.embedded_dns | bool %}
+disabledFeatures: null
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+ bindNetwork: tcp4
{% endif %}
etcdClientInfo:
ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
@@ -77,9 +87,8 @@ kubernetesMasterConfig:
- v1
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
-{# TODO: support overriding masterCount #}
- masterCount: 1
- masterIP: ""
+ masterCount: {{ openshift.master.master_count }}
+ masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
certFile: master.proxy-client.crt
@@ -103,6 +112,7 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
+pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
openshiftInfrastructureNamespace: openshift-infra
@@ -118,6 +128,7 @@ projectConfig:
routingConfig:
subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
+ limitSecretReferences: false
managedNames:
- default
- builder
@@ -128,8 +139,20 @@ serviceAccountConfig:
- serviceaccounts.public.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
+{% if named_certificates %}
+ namedCertificates:
+{% for named_certificate in named_certificates %}
+ - certFile: {{ named_certificate['certfile'] }}
+ keyFile: {{ named_certificate['keyfile'] }}
+ names:
+{% for name in named_certificate['names'] %}
+ - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
new file mode 100644
index 000000000..d12d9db90
--- /dev/null
+++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift_master_session_auth_secrets %}
+- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}"
+ encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}"
+{% endfor %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index ecdb4f883..534465451 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -2,6 +2,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index cfd1ceabf..0738048d3 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install the base package for admin tooling
- yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ yum: pkg={{ openshift.common.service_type }} state=present
register: install_result
- name: Reload generated facts
@@ -14,7 +14,7 @@
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ master_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 4b39b043a..e966e793e 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -34,9 +34,6 @@
- serviceaccounts.private.key
- serviceaccounts.public.key
-- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}"
- with_items: masters_needing_certs
-
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
deleted file mode 100644
index 3b416005b..000000000
--- a/roles/openshift_master_cluster/tasks/configure_deferred.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- debug: msg="Deferring config"
-
-- name: Start and enable the master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 315947183..6303a6e46 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -4,10 +4,7 @@
register: pcs_status
changed_when: false
failed_when: false
- when: not openshift.master.cluster_defer_ha | bool
+ when: openshift.master.cluster_method == "pacemaker"
- include: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
-
-- include: configure_deferred.yml
- when: openshift.master.cluster_defer_ha | bool
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c92008a77..9d40ae3b3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_common }
+- { role: docker }
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index aea60b75c..c455a09f1 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -8,7 +8,7 @@
when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
- when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
+ when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- name: Set node facts
openshift_facts:
@@ -45,6 +45,15 @@
register: sdn_install_result
when: openshift.common.use_openshift_sdn
+- name: Install Node package
+ yum: pkg={{ openshift.common.service_type }}-node state=present
+ register: node_install_result
+
+- name: Install sdn-ovs package
+ yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present
+ register: sdn_install_result
+ when: openshift.common.use_openshift_sdn
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 4931d127e..509cce2e0 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -22,6 +22,7 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+nodeIP: {{ openshift.common.ip }}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 12e98b7a1..aa696ae12 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -8,7 +8,7 @@
# proper repos correctly.
- assert:
- that: openshift_deployment_type in known_openshift_deployment_types
+ that: openshift.common.deployment_type in known_openshift_deployment_types
- name: Ensure libselinux-python is installed
yum:
diff --git a/utils/docs/config.md b/utils/docs/config.md
index 9399409dd..2729f8d37 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -7,6 +7,7 @@ The default location this config file will be written to ~/.config/openshift/ins
## Example
```
+version: v1
variant: openshift-enterprise
variant_version: 3.0
ansible_ssh_user: root
@@ -18,20 +19,27 @@ hosts:
master: true
node: true
containerized: true
+ connect_to: 24.222.0.1
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
+ connect_to: 10.0.0.2
- ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ connect_to: 10.0.0.3
```
## Primary Settings
+### version
+
+Indicates the version of configuration this file was written with. Current implementation is v1.
+
### variant
The OpenShift variant to install. Currently valid options are:
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index d3ee8c51e..8c2421183 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -101,18 +101,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
- ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')
-
while more_hosts:
host_props = {}
hostname_or_ip = click.prompt('Enter hostname or IP address:',
default='',
value_proc=validate_prompt_hostname)
- if ip_regex.match(hostname_or_ip):
- host_props['ip'] = hostname_or_ip
- else:
- host_props['hostname'] = hostname_or_ip
+ host_props['connect_to'] = hostname_or_ip
host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
host_props['node'] = True
@@ -150,7 +145,7 @@ Plese confirm that they are correct before moving forward.
notes = """
Format:
-IP,public IP,hostname,public hostname
+connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
@@ -168,16 +163,15 @@ Notes:
default_facts_lines = []
default_facts = {}
- validated_facts = {}
for h in hosts:
- default_facts[h] = {}
- h.ip = callback_facts[str(h)]["common"]["ip"]
- h.public_ip = callback_facts[str(h)]["common"]["public_ip"]
- h.hostname = callback_facts[str(h)]["common"]["hostname"]
- h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"]
-
- validated_facts[h] = {}
- default_facts_lines.append(",".join([h.ip,
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
@@ -316,10 +310,10 @@ Add new nodes here
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
- if(host.name in callback_facts.keys()
- and 'common' in callback_facts[host.name].keys()
- and callback_facts[host.name]['common'].get('version', '')
- and callback_facts[host.name]['common'].get('version', '') != 'None'):
+ if(host.connect_to in callback_facts.keys()
+ and 'common' in callback_facts[host.connect_to].keys()
+ and callback_facts[host.connect_to]['common'].get('version', '')
+ and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
@@ -331,7 +325,22 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# Check if master or nodes already have something installed
installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
if len(installed_hosts) > 0:
- # present a message listing already installed hosts
+ click.echo('Installed environment detected.')
+ # This check has to happen before we start removing hosts later in this method
+ if not force:
+ if not unattended:
+ click.echo('By default the installer only adds new nodes to an installed environment.')
+ response = click.prompt('Do you want to (1) only add additional nodes or ' \
+ '(2) perform a clean install?', type=int)
+ # TODO: this should be reworked with error handling.
+ # Click can certainly do this for us.
+ # This should be refactored as soon as we add a 3rd option.
+ if response == 1:
+ force = False
+ if response == 2:
+ force = True
+
+ # present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
if host.master:
click.echo("{} is already an OpenShift Master".format(host))
@@ -339,32 +348,42 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
# new nodes.
elif host.node:
click.echo("{} is already an OpenShift Node".format(host))
- hosts_to_run_on.remove(host)
- # for unattended either continue if they force install or exit if they didn't
- if unattended:
- if not force:
- click.echo('Installed environment detected and no additional nodes specified: ' \
- 'aborting. If you want a fresh install, use --force')
- sys.exit(1)
- # for attended ask the user what to do
+ # force is only used for reinstalls so we don't want to remove
+ # anything.
+ if not force:
+ hosts_to_run_on.remove(host)
+
+ # Handle the cases where we know about uninstalled systems
+ new_hosts = set(hosts_to_run_on) - set(installed_hosts)
+ if len(new_hosts) > 0:
+ for new_host in new_hosts:
+ click.echo("{} is currently uninstalled".format(new_host))
+
+ # Fall through
+ click.echo('Adding additional nodes...')
else:
- click.echo('Installed environment detected and no additional nodes specified. ')
- response = click.prompt('Do you want to (1) add more nodes or ' \
- '(2) perform a clean install?', type=int)
- if response == 1: # add more nodes
- new_nodes = collect_new_nodes()
-
- hosts_to_run_on.extend(new_nodes)
- oo_cfg.hosts.extend(new_nodes)
-
- openshift_ansible.set_config(oo_cfg)
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
- if error:
- click.echo("There was a problem fetching the required information. " \
- "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional nodes specified: ' \
+ 'aborting. If you want a fresh install, use ' \
+ '`atomic-openshift-installer install --force`')
sys.exit(1)
else:
- pass # proceeding as normal should do a clean install
+ if not force:
+ new_nodes = collect_new_nodes()
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ openshift_ansible.set_config(oo_cfg)
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
return hosts_to_run_on, callback_facts
@@ -385,7 +404,7 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
dir_okay=True,
readable=True),
# callback=validate_ansible_dir,
- default='/usr/share/ansible/openshift-ansible/',
+ default=DEFAULT_PLAYBOOK_DIR,
envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
@click.option('--ansible-config',
type=click.Path(file_okay=True,
@@ -450,7 +469,7 @@ def uninstall(ctx):
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
@@ -459,6 +478,43 @@ def uninstall(ctx):
openshift_ansible.run_uninstall_playbook()
+@click.command()
+@click.pass_context
+def upgrade(ctx):
+ oo_cfg = ctx.obj['oo_cfg']
+
+ if len(oo_cfg.hosts) == 0:
+ click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+ sys.exit(1)
+
+ # Update config to reflect the version we're targetting, we'll write
+ # to disk once ansible completes successfully, not before.
+ old_variant = oo_cfg.settings['variant']
+ old_version = oo_cfg.settings['variant_version']
+ if oo_cfg.settings['variant'] == 'enterprise':
+ oo_cfg.settings['variant'] = 'openshift-enterprise'
+ version = find_variant(oo_cfg.settings['variant'])[1]
+ oo_cfg.settings['variant_version'] = version.name
+ click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
+ old_variant, old_version, oo_cfg.settings['variant'],
+ oo_cfg.settings['variant_version']))
+ for host in oo_cfg.hosts:
+ click.echo(" * %s" % host.connect_to)
+
+ if not ctx.obj['unattended']:
+ # Prompt interactively to confirm:
+ proceed = click.confirm("\nDo you wish to proceed?")
+ if not proceed:
+ click.echo("Upgrade cancelled.")
+ sys.exit(0)
+
+ retcode = openshift_ansible.run_upgrade_playbook()
+ if retcode > 0:
+ click.echo("Errors encountered during upgrade, please check %s." %
+ oo_cfg.settings['ansible_log_path'])
+ else:
+ click.echo("Upgrade completed! Rebooting all hosts is recommended.")
+
@click.command()
@click.option('--force', '-f', is_flag=True, default=False)
@@ -523,6 +579,7 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
click.pause()
cli.add_command(install)
+cli.add_command(upgrade)
cli.add_command(uninstall)
if __name__ == '__main__':
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index a2f53cf78..f35a8f51b 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -12,6 +12,7 @@ PERSIST_SETTINGS = [
'ansible_log_path',
'variant',
'variant_version',
+ 'version',
]
REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
@@ -34,6 +35,7 @@ class Host(object):
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -42,30 +44,25 @@ class Host(object):
self.node = kwargs.get('node', False)
self.containerized = kwargs.get('containerized', False)
- if self.ip is None and self.hostname is None:
- raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'")
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either and 'ip' " \
+ "or 'hostname' to connect to.")
if self.master is False and self.node is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
- # Hosts can be specified with an ip, hostname, or both. However we need
- # something authoritative we can connect to and refer to the host by.
- # Preference given to the IP if specified as this is more specific.
- # We know one must be set by this point.
- self.name = self.ip if self.ip is not None else self.hostname
-
def __str__(self):
- return self.name
+ return self.connect_to
def __repr__(self):
- return self.name
+ return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized']:
+ 'master', 'node', 'containerized', 'connect_to']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
@@ -73,7 +70,6 @@ class Host(object):
class OOConfig(object):
- new_config = True
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ['HOME'] + '/.config/') + '/openshift/')
@@ -86,19 +82,22 @@ class OOConfig(object):
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.settings = {}
- self.read_config()
- self.set_defaults()
+ self._read_config()
+ self._set_defaults()
- def read_config(self, is_new=False):
+ def _read_config(self):
self.hosts = []
try:
- new_settings = None
if os.path.exists(self.config_path):
cfgfile = open(self.config_path, 'r')
- new_settings = yaml.safe_load(cfgfile.read())
+ self.settings = yaml.safe_load(cfgfile.read())
cfgfile.close()
- if new_settings:
- self.settings = new_settings
+
+ # Use the presence of a Description as an indicator this is
+ # a legacy config file:
+ if 'Description' in self.settings:
+ self._upgrade_legacy_config()
+
# Parse the hosts into DTO objects:
if 'hosts' in self.settings:
for host in self.settings['hosts']:
@@ -114,9 +113,28 @@ class OOConfig(object):
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
- self.new_config = is_new
- def set_defaults(self):
+ def _upgrade_legacy_config(self):
+ new_hosts = []
+ if 'validated_facts' in self.settings:
+ for key, value in self.settings['validated_facts'].iteritems():
+ if 'masters' in self.settings and key in self.settings['masters']:
+ value['master'] = True
+ if 'nodes' in self.settings and key in self.settings['nodes']:
+ value['node'] = True
+ new_hosts.append(value)
+ self.settings['hosts'] = new_hosts
+
+ remove_settings = ['validated_facts', 'Description', 'Name',
+ 'Subscription', 'Vendor', 'Version', 'masters', 'nodes']
+ for s in remove_settings:
+ del self.settings[s]
+
+ # A legacy config implies openshift-enterprise 3.0:
+ self.settings['variant'] = 'openshift-enterprise'
+ self.settings['variant_version'] = '3.0'
+
+ def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = \
@@ -125,6 +143,8 @@ class OOConfig(object):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
+ if 'version' not in self.settings:
+ self.settings['version'] = 'v1'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
@@ -158,7 +178,7 @@ class OOConfig(object):
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
- result[host.name] = missing_facts
+ result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
@@ -190,6 +210,6 @@ class OOConfig(object):
def get_host(self, name):
for host in self.hosts:
- if host.name == name:
+ if host.connect_to == name:
return host
return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 0def72cfd..d2399df5c 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -16,10 +16,8 @@ def set_config(cfg):
CFG = cfg
def generate_inventory(hosts):
- print hosts
global CFG
- installer_host = socket.gethostname()
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
@@ -33,25 +31,18 @@ def generate_inventory(hosts):
version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
- if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:'
- '5001/openshift3/ose-${component}:${version}\n')
- if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ:
- base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', "
+ if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_additional_registries={}\n'
+ .format(os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
+ if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
+ base_inventory.write('cli_docker_insecure_registries={}\n'
+ .format(os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
+ if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
+ # We have to double the '{' here for literals
+ base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
"'name': 'ose-devel', "
- "'baseurl': 'http://buildvm-devops.usersys.redhat.com"
- "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', "
- "'enabled': 1, 'gpgcheck': 0}]\n")
- if 'OO_INSTALL_STAGE_REGISTRY' in os.environ:
- base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n')
-
- if any(host.hostname == installer_host or host.public_hostname == installer_host
- for host in hosts):
- no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive'])
- if no_pwd_sudo == 1:
- print 'The atomic-openshift-installer requires sudo access without a password.'
- sys.exit(1)
- base_inventory.write("ansible_connection=local\n")
+ "'baseurl': '{}', "
+ "'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
base_inventory.write('\n[masters]\n')
masters = (host for host in hosts if host.master)
@@ -73,6 +64,7 @@ def generate_inventory(hosts):
def write_host(host, inventory, scheduleable=True):
global CFG
+
facts = ''
if host.ip:
facts += ' openshift_ip={}'.format(host.ip)
@@ -86,7 +78,17 @@ def write_host(host, inventory, scheduleable=True):
# Technically only nodes will ever need this.
if not scheduleable:
facts += ' openshift_scheduleable=False'
- inventory.write('{} {}\n'.format(host, facts))
+ installer_host = socket.gethostname()
+ if host.hostname == installer_host or host.public_hostname == installer_host:
+ facts += ' ansible_connection=local'
+ if os.geteuid() != 0:
+ no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+ if no_pwd_sudo == 1:
+ print 'The atomic-openshift-installer requires sudo access without a password.'
+ sys.exit(1)
+ facts += ' ansible_become=true'
+
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
def load_system_facts(inventory_file, os_facts_path, env_vars):
@@ -145,6 +147,7 @@ def run_ansible(playbook, inventory, env_vars):
playbook],
env=env_vars)
+
def run_uninstall_playbook():
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
@@ -155,3 +158,17 @@ def run_uninstall_playbook():
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
return run_ansible(playbook, inventory_file, facts_env)
+
+
+def run_upgrade_playbook():
+ playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+ 'playbooks/adhoc/upgrades/upgrade.yml')
+ # TODO: Upgrade inventory for upgrade?
+ inventory_file = generate_inventory(CFG.hosts)
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(playbook, inventory_file, facts_env)
+
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 05281d654..3bb61dddb 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -29,6 +29,9 @@ class Variant(object):
self.versions = versions
+ def latest_version(self):
+ return self.versions[-1]
+
# WARNING: Keep the versions ordered, most recent last:
OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
@@ -58,7 +61,7 @@ def find_variant(name, version=None):
for prod in SUPPORTED_VARIANTS:
if prod.name == name:
if version is None:
- return (prod, prod.versions[-1])
+ return (prod, prod.latest_version())
for v in prod.versions:
if v.name == version:
return (prod, v)
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 01af33fd9..6dc335a0e 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -32,6 +32,26 @@ hosts:
node: true
"""
+# Used to test automatic upgrading of config:
+LEGACY_CONFIG = """
+Description: This is the configuration file for the OpenShift Ansible-Based Installer.
+Name: OpenShift Ansible-Based Installer Configuration
+Subscription: {type: none}
+Vendor: OpenShift Community
+Version: 0.0.1
+ansible_config: /tmp/notreal/ansible.cfg
+ansible_inventory_directory: /tmp/notreal/.config/openshift/.ansible
+ansible_log_path: /tmp/ansible.log
+ansible_plugins_directory: /tmp/notreal/.python-eggs/ooinstall-3.0.0-py2.7.egg-tmp/ooinstall/ansible_plugins
+masters: [10.0.0.1]
+nodes: [10.0.0.2, 10.0.0.3]
+validated_facts:
+ 10.0.0.1: {hostname: master-private.example.com, ip: 10.0.0.1, public_hostname: master.example.com, public_ip: 24.222.0.1}
+ 10.0.0.2: {hostname: node1-private.example.com, ip: 10.0.0.2, public_hostname: node1.example.com, public_ip: 24.222.0.2}
+ 10.0.0.3: {hostname: node2-private.example.com, ip: 10.0.0.3, public_hostname: node2.example.com, public_ip: 24.222.0.3}
+"""
+
+
CONFIG_INCOMPLETE_FACTS = """
hosts:
- ip: 10.0.0.1
@@ -74,6 +94,48 @@ class OOInstallFixture(unittest.TestCase):
return path
+class LegacyOOConfigTests(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), LEGACY_CONFIG)
+ self.cfg = OOConfig(self.cfg_path)
+
+ def test_load_config_memory(self):
+ self.assertEquals('openshift-enterprise', self.cfg.settings['variant'])
+ self.assertEquals('3.0', self.cfg.settings['variant_version'])
+ self.assertEquals('v1', self.cfg.settings['version'])
+
+ self.assertEquals(3, len(self.cfg.hosts))
+ h1 = self.cfg.get_host('10.0.0.1')
+ self.assertEquals('10.0.0.1', h1.ip)
+ self.assertEquals('24.222.0.1', h1.public_ip)
+ self.assertEquals('master-private.example.com', h1.hostname)
+ self.assertEquals('master.example.com', h1.public_hostname)
+
+ h2 = self.cfg.get_host('10.0.0.2')
+ self.assertEquals('10.0.0.2', h2.ip)
+ self.assertEquals('24.222.0.2', h2.public_ip)
+ self.assertEquals('node1-private.example.com', h2.hostname)
+ self.assertEquals('node1.example.com', h2.public_hostname)
+
+ h3 = self.cfg.get_host('10.0.0.3')
+ self.assertEquals('10.0.0.3', h3.ip)
+ self.assertEquals('24.222.0.3', h3.public_ip)
+ self.assertEquals('node2-private.example.com', h3.hostname)
+ self.assertEquals('node2.example.com', h3.public_hostname)
+
+ self.assertFalse('masters' in self.cfg.settings)
+ self.assertFalse('nodes' in self.cfg.settings)
+ self.assertFalse('Description' in self.cfg.settings)
+ self.assertFalse('Name' in self.cfg.settings)
+ self.assertFalse('Subscription' in self.cfg.settings)
+ self.assertFalse('Vendor' in self.cfg.settings)
+ self.assertFalse('Version' in self.cfg.settings)
+ self.assertFalse('validates_facts' in self.cfg.settings)
+
+
class OOConfigTests(OOInstallFixture):
def test_load_config(self):
@@ -91,6 +153,7 @@ class OOConfigTests(OOInstallFixture):
[host['ip'] for host in ooconfig.settings['hosts']])
self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+ self.assertEquals('v1', ooconfig.settings['version'])
def test_load_complete_facts(self):
cfg_path = self.write_config(os.path.join(self.work_dir,
@@ -128,6 +191,7 @@ class OOConfigTests(OOInstallFixture):
self.assertTrue('ansible_ssh_user' in written_config)
self.assertTrue('variant' in written_config)
+ self.assertEquals('v1', written_config['version'])
# Some advanced settings should not get written out if they
# were not specified by the user: