summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
authorTroy Dawson <tdawson@redhat.com>2015-04-16 16:19:02 -0500
committerTroy Dawson <tdawson@redhat.com>2015-04-16 16:19:02 -0500
commit7f7b582a7bc239e69c147b98c8c2512050f12851 (patch)
treef0701e3ce7a42761e9dfb59218057a46e48a901b /roles
parentdb9cf8ef4f030f30391e021f360fe0c3db1dce74 (diff)
parent0722304b2f9c94a2f70054e0a3c7feceaedb195c (diff)
downloadopenshift-7f7b582a7bc239e69c147b98c8c2512050f12851.tar.gz
openshift-7f7b582a7bc239e69c147b98c8c2512050f12851.tar.bz2
openshift-7f7b582a7bc239e69c147b98c8c2512050f12851.tar.xz
openshift-7f7b582a7bc239e69c147b98c8c2512050f12851.zip
Merge pull request #158 from openshift/master
Merge master into INT for first v3 INT deploy
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_tower/tasks/main.yaml7
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml11
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j211
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/openshift_common/README.md20
-rw-r--r--roles/openshift_common/defaults/main.yml7
-rw-r--r--roles/openshift_common/meta/main.yml2
-rw-r--r--roles/openshift_common/tasks/main.yml31
-rw-r--r--roles/openshift_common/tasks/set_facts.yml9
-rw-r--r--roles/openshift_common/vars/main.yml5
-rw-r--r--roles/openshift_facts/README.md34
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py482
-rw-r--r--roles/openshift_facts/meta/main.yml15
-rw-r--r--roles/openshift_facts/tasks/main.yml3
-rw-r--r--roles/openshift_master/README.md29
-rw-r--r--roles/openshift_master/defaults/main.yml13
-rw-r--r--roles/openshift_master/handlers/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml77
-rw-r--r--roles/openshift_master/vars/main.yml2
-rw-r--r--roles/openshift_node/README.md7
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/library/openshift_register_node.py211
-rw-r--r--roles/openshift_node/tasks/main.yml84
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/README.md34
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml5
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py371
-rw-r--r--roles/openshift_register_nodes/meta/main.yml17
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml67
-rw-r--r--roles/openshift_repos/README.md38
-rw-r--r--roles/openshift_repos/defaults/main.yaml (renamed from roles/repos/defaults/main.yaml)2
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta (renamed from roles/repos/files/online/RPM-GPG-KEY-redhat-beta)0
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release (renamed from roles/repos/files/online/RPM-GPG-KEY-redhat-release)0
-rw-r--r--roles/openshift_repos/files/online/epel7-kubernetes.repo (renamed from roles/repos/files/online/epel7-kubernetes.repo)0
-rw-r--r--roles/openshift_repos/files/online/epel7-openshift.repo (renamed from roles/repos/files/online/epel7-openshift.repo)0
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo (renamed from roles/repos/files/online/oso-rhui-rhel-7-extras.repo)0
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo (renamed from roles/repos/files/online/oso-rhui-rhel-7-server.repo)0
-rw-r--r--roles/openshift_repos/files/online/rhel-7-libra-candidate.repo (renamed from roles/repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/meta/main.yml15
-rw-r--r--roles/openshift_repos/tasks/main.yaml (renamed from roles/repos/tasks/main.yaml)11
-rw-r--r--roles/openshift_repos/templates/yum_repo.j2 (renamed from roles/repos/templates/yum_repo.j2)0
-rw-r--r--roles/openshift_repos/vars/main.yml (renamed from roles/repos/vars/main.yml)0
-rw-r--r--roles/openshift_sdn_master/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_master/meta/main.yml3
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml18
-rw-r--r--roles/openshift_sdn_node/README.md9
-rw-r--r--roles/openshift_sdn_node/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_node/meta/main.yml3
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml23
-rw-r--r--roles/os_env_extras_node/tasks/main.yml5
-rwxr-xr-x[-rw-r--r--]roles/os_firewall/library/os_firewall_manage_iptables.py63
-rw-r--r--roles/os_firewall/meta/main.yml1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml5
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml12
-rw-r--r--roles/os_update_latest/tasks/main.yml3
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py273
-rw-r--r--roles/yum_repos/README.md113
-rw-r--r--roles/yum_repos/defaults/main.yml3
-rw-r--r--roles/yum_repos/meta/main.yml8
-rw-r--r--roles/yum_repos/tasks/main.yml47
-rw-r--r--roles/yum_repos/templates/yumrepo.j218
67 files changed, 1842 insertions, 459 deletions
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index f58a5b1c2..1d75a95e6 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -9,6 +9,7 @@
- ansible
- telnet
- ack
+ - python-ansible-tower-cli
- name: download Tower setup
get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -25,3 +26,9 @@
- name: Open firewalld port for https
firewalld: port=8080/tcp permanent=true state=enabled
+- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect state=yes persistent=yes
+
+- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
+
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 2ecefd588..ca700db17 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,5 +11,5 @@
# From the origin rpm there exists instructions on how to
# setup origin properly. The following steps come from there
- name: Change root to be in the Docker group
- user: name=root groups=docker append=yes
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
new file mode 100644
index 000000000..69a07effd
--- /dev/null
+++ b/roles/openshift_ansible_inventory/README.md
@@ -0,0 +1,41 @@
+Openshift Ansible Inventory
+=========
+
+Install and configure openshift-ansible-inventory.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+oo_inventory_group
+oo_inventory_user
+oo_inventory_accounts
+oo_inventory_cache_max_age
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
new file mode 100644
index 000000000..f53c00c80
--- /dev/null
+++ b/roles/openshift_ansible_inventory/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+oo_inventory_group: root
+oo_inventory_owner: root
+oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
new file mode 100644
index 000000000..e2db43477
--- /dev/null
+++ b/roles/openshift_ansible_inventory/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
new file mode 100644
index 000000000..ff3df0a7d
--- /dev/null
+++ b/roles/openshift_ansible_inventory/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Install and configure openshift-ansible-inventory
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
new file mode 100644
index 000000000..3990d5750
--- /dev/null
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- yum:
+ name: openshift-ansible-inventory
+ state: present
+
+- template:
+ src: multi_ec2.yaml.j2
+ dest: /etc/ansible/multi_ec2.yaml
+ group: "{{ oo_inventory_group }}"
+ owner: "{{ oo_inventory_owner }}"
+ mode: "0640"
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
new file mode 100644
index 000000000..23dfe73b8
--- /dev/null
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -0,0 +1,11 @@
+# multi ec2 inventory configs
+cache_max_age: {{ oo_inventory_cache_max_age }}
+accounts:
+{% for account in oo_inventory_accounts %}
+ - name: {{ account.name }}
+ provider: {{ account.provider }}
+ env_vars:
+ AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+
+{% endfor %}
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
new file mode 100644
index 000000000..25c049282
--- /dev/null
+++ b/roles/openshift_ansible_inventory/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for openshift_ansible_inventory
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index c2ae609ff..14c2037e4 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -12,19 +12,21 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|------------------------------|----------------------------------------|
-| openshift_bind_ip | ansible_default_ipv4.address | IP to use for local binding |
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_env | default | Envrionment name if multiple OpenShift instances |
+| Name | Default value | |
+|---------------------------|-------------------|---------------------------------------------|
+| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
+| openshift_ip | UNDEF | Internal IP address to use for this host |
+| openshift_public_hostname | UNDEF | Public hostname to use for this host |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
Dependencies
------------
os_firewall
+openshift_facts
+openshift_repos
Example Playbook
----------------
@@ -39,4 +41,4 @@ Apache License, Version 2.0
Author Information
------------------
-TODO
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
index a541591fb..4d3e0fe9e 100644
--- a/roles/openshift_common/defaults/main.yml
+++ b/roles/openshift_common/defaults/main.yml
@@ -1,8 +1,3 @@
---
-openshift_bind_ip: "{{ ansible_default_ipv4.address }}"
+openshift_cluster_id: 'default'
openshift_debug_level: 0
-
-# TODO: Once openshift stops resolving hostnames for node queries remove
-# this...
-openshift_hostname_workaround: true
-openshift_hostname: "{{ openshift_public_ip if openshift_hostname_workaround else ansible_fqdn }}"
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
index 88b7677d0..81363ec68 100644
--- a/roles/openshift_common/meta/main.yml
+++ b/roles/openshift_common/meta/main.yml
@@ -13,3 +13,5 @@ galaxy_info:
- cloud
dependencies:
- { role: os_firewall }
+- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 728bba4e4..941190534 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,21 +1,16 @@
---
-# fixme: Once openshift stops resolving hostnames for node queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
- hostname: name={{ openshift_bind_ip }}
- when: openshift_hostname_workaround
+- name: Set common OpenShift facts
+ openshift_facts:
+ role: 'common'
+ local_facts:
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ debug_level: "{{ openshift_debug_level | default(0) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-- name: Configure local facts file
- file: path=/etc/ansible/facts.d/ state=directory mode=0750
+- name: Set hostname
+ hostname: name={{ openshift.common.hostname }}
-- name: Set common OpenShift facts
- include: set_facts.yml
- facts:
- - section: common
- option: env
- value: "{{ openshift_env | default('default') }}"
- - section: common
- option: host_type
- value: "{{ openshift_host_type }}"
- - section: common
- option: debug_level
- value: "{{ openshift_debug_level }}"
diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml
deleted file mode 100644
index 349eecd1d..000000000
--- a/roles/openshift_common/tasks/set_facts.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: "Setting local_facts"
- ini_file:
- dest: /etc/ansible/facts.d/openshift.fact
- mode: 0640
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items: facts
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 623aed9bf..50816d319 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -1,6 +1,7 @@
---
-openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/
-
# TODO: Upstream kubernetes only supports iptables currently, if this changes,
# then these variable should be moved to defaults
+# TODO: it might be possible to still use firewalld if we wire up the created
+# chains with the public zone (or the zone associated with the correct
+# interfaces)
os_firewall_use_firewalld: False
diff --git a/roles/openshift_facts/README.md b/roles/openshift_facts/README.md
new file mode 100644
index 000000000..2fd50e236
--- /dev/null
+++ b/roles/openshift_facts/README.md
@@ -0,0 +1,34 @@
+OpenShift Facts
+===============
+
+Provides the openshift_facts module
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
new file mode 100755
index 000000000..0dd343443
--- /dev/null
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -0,0 +1,482 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+DOCUMENTATION = '''
+---
+module: openshift_facts
+short_description: OpenShift Facts
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+import ConfigParser
+import copy
+
+class OpenShiftFactsUnsupportedRoleError(Exception):
+ pass
+
+class OpenShiftFactsFileWriteError(Exception):
+ pass
+
+class OpenShiftFacts():
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+
+ def __init__(self, role, filename, local_facts):
+ self.changed = False
+ self.filename = filename
+ if role not in self.known_roles:
+ raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+ self.role = role
+ self.facts = self.generate_facts(local_facts)
+
+ def generate_facts(self, local_facts):
+ local_facts = self.init_local_facts(local_facts)
+ roles = local_facts.keys()
+
+ defaults = self.get_defaults(roles)
+ provider_facts = self.init_provider_facts()
+ facts = self.apply_provider_facts(defaults, provider_facts, roles)
+
+ facts = self.merge_facts(facts, local_facts)
+ facts['current_config'] = self.current_config(facts)
+ self.set_url_facts_if_unset(facts)
+ return dict(openshift=facts)
+
+
+ def set_url_facts_if_unset(self, facts):
+ if 'master' in facts:
+ for (url_var, use_ssl, port, default) in [
+ ('api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['hostname']),
+ ('public_api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['public_hostname']),
+ ('console_url',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['hostname']),
+ ('public_console_url' 'console_use_ssl',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['public_hostname'])]:
+ if url_var not in facts['master']:
+ scheme = 'https' if use_ssl else 'http'
+ netloc = default
+ if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+ netloc = "%s:%s" % (netloc, port)
+ facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+
+
+ # Query current OpenShift config and return a dictionary containing
+ # settings that may be valuable for determining actions that need to be
+ # taken in the playbooks/roles
+ def current_config(self, facts):
+ current_config=dict()
+ roles = [ role for role in facts if role not in ['common','provider'] ]
+ for role in roles:
+ if 'roles' in current_config:
+ current_config['roles'].append(role)
+ else:
+ current_config['roles'] = [role]
+
+ # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
+ # determine the location of files.
+
+ # Query kubeconfig settings
+ kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+ if role == 'node':
+ kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+
+ kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+ try:
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+ "config", "view", "-o",
+ "json",
+ "--kubeconfig=%s" % kubeconfig_path],
+ check_rc=False)
+ config = json.loads(output)
+
+ try:
+ for cluster in config['clusters']:
+ config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+ except KeyError:
+ pass
+ try:
+ for user in config['users']:
+ config['users'][user]['client-certificate-data'] = 'masked'
+ config['users'][user]['client-key-data'] = 'masked'
+ except KeyError:
+ pass
+
+ current_config['kubeconfig'] = config
+ except Exception:
+ pass
+
+ return current_config
+
+
+ def apply_provider_facts(self, facts, provider_facts, roles):
+ if not provider_facts:
+ return facts
+
+ use_openshift_sdn = provider_facts.get('use_openshift_sdn')
+ if isinstance(use_openshift_sdn, bool):
+ facts['common']['use_openshift_sdn'] = use_openshift_sdn
+
+ common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
+ for h_var, ip_var in common_vars:
+ ip_value = provider_facts['network'].get(ip_var)
+ if ip_value:
+ facts['common'][ip_var] = ip_value
+
+ facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+
+ if 'node' in roles:
+ ext_id = provider_facts.get('external_id')
+ if ext_id:
+ facts['node']['external_id'] = ext_id
+
+ facts['provider'] = provider_facts
+ return facts
+
+ def hostname_valid(self, hostname):
+ if (not hostname or
+ hostname.startswith('localhost') or
+ hostname.endswith('localdomain') or
+ len(hostname.split('.')) < 2):
+ return False
+
+ return True
+
+ def choose_hostname(self, hostnames=[], fallback=''):
+ hostname = fallback
+
+ ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
+ hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+
+ for host_list in (hosts, ips):
+ for h in host_list:
+ if self.hostname_valid(h):
+ return h
+
+ return hostname
+
+ def get_defaults(self, roles):
+ hardware_facts = self.get_hardware_facts()
+ net_facts = self.get_net_facts()
+ base_facts = self.get_base_facts()
+
+ defaults = dict()
+
+ common = dict(use_openshift_sdn=True)
+ ip = net_facts['default_ipv4']['address']
+ common['ip'] = ip
+ common['public_ip'] = ip
+
+ rc, output, error = module.run_command(['hostname', '-f'])
+ hostname_f = output.strip() if rc == 0 else ''
+ hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+ hostname = self.choose_hostname(hostname_values)
+
+ common['hostname'] = hostname
+ common['public_hostname'] = hostname
+ defaults['common'] = common
+
+ if 'master' in roles:
+ # TODO: provide for a better way to override just the port, or just
+ # the urls, instead of forcing both, also to override the hostname
+ # without having to re-generate these urls later
+ master = dict(api_use_ssl=True, api_port='8443',
+ console_use_ssl=True, console_path='/console',
+ console_port='8443', etcd_use_ssl=False,
+ etcd_port='4001')
+ defaults['master'] = master
+
+ if 'node' in roles:
+ node = dict(external_id=common['hostname'], pod_cidr='',
+ labels={}, annotations={})
+ node['resources_cpu'] = hardware_facts['processor_cores']
+ node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ defaults['node'] = node
+
+ return defaults
+
+ def merge_facts(self, orig, new):
+ facts = dict()
+ for key, value in orig.iteritems():
+ if key in new:
+ if isinstance(value, dict):
+ facts[key] = self.merge_facts(value, new[key])
+ else:
+ facts[key] = copy.copy(new[key])
+ else:
+ facts[key] = copy.deepcopy(value)
+ new_keys = set(new.keys()) - set(orig.keys())
+ for key in new_keys:
+ facts[key] = copy.deepcopy(new[key])
+ return facts
+
+ def query_metadata(self, metadata_url, headers=None, expect_json=False):
+ r, info = fetch_url(module, metadata_url, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to query metadata', result=r,
+ info=info)
+ if expect_json:
+ return module.from_json(r.read())
+ else:
+ return [line.strip() for line in r.readlines()]
+
+ def walk_metadata(self, metadata_url, headers=None, expect_json=False):
+ metadata = dict()
+
+ for line in self.query_metadata(metadata_url, headers, expect_json):
+ if line.endswith('/') and not line == 'public-keys/':
+ key = line[:-1]
+ metadata[key]=self.walk_metadata(metadata_url + line, headers,
+ expect_json)
+ else:
+ results = self.query_metadata(metadata_url + line, headers,
+ expect_json)
+ if len(results) == 1:
+ metadata[line] = results.pop()
+ else:
+ metadata[line] = results
+ return metadata
+
+ def get_provider_metadata(self, metadata_url, supports_recursive=False,
+ headers=None, expect_json=False):
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ return metadata
+
+ def get_hardware_facts(self):
+ if not hasattr(self, 'hardware_facts'):
+ self.hardware_facts = Hardware().populate()
+ return self.hardware_facts
+
+ def get_base_facts(self):
+ if not hasattr(self, 'base_facts'):
+ self.base_facts = Facts().populate()
+ return self.base_facts
+
+ def get_virt_facts(self):
+ if not hasattr(self, 'virt_facts'):
+ self.virt_facts = Virtual().populate()
+ return self.virt_facts
+
+ def get_net_facts(self):
+ if not hasattr(self, 'net_facts'):
+ self.net_facts = Network(module).populate()
+ return self.net_facts
+
+ def guess_host_provider(self):
+ # TODO: cloud provider facts should probably be submitted upstream
+ virt_facts = self.get_virt_facts()
+ hardware_facts = self.get_hardware_facts()
+ product_name = hardware_facts['product_name']
+ product_version = hardware_facts['product_version']
+ virt_type = virt_facts['virtualization_type']
+ virt_role = virt_facts['virtualization_role']
+ provider = None
+ metadata = None
+
+ # TODO: this is not exposed through module_utils/facts.py in ansible,
+ # need to create PR for ansible to expose it
+ bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+ if bios_vendor == 'Google':
+ provider = 'gce'
+ metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+ headers = {'Metadata-Flavor': 'Google'}
+ metadata = self.get_provider_metadata(metadata_url, True, headers,
+ True)
+
+ # Filter sshKeys and serviceAccounts from gce metadata
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+ provider = 'ec2'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = self.get_provider_metadata(metadata_url)
+ elif re.search(r'OpenStack', product_name):
+ provider = 'openstack'
+ metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ metadata = self.get_provider_metadata(metadata_url, True, None, True)
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+ return dict(name=provider, metadata=metadata)
+
+ def normalize_provider_facts(self, provider, metadata):
+ if provider is None or metadata is None:
+ return {}
+
+ # TODO: test for ipv6_enabled where possible (gce, aws do not support)
+ # and configure ipv6 facts if available
+
+ # TODO: add support for setting user_data if available
+
+ facts = dict(name=provider, metadata=metadata)
+ network = dict(interfaces=[], ipv6_enabled=False)
+ if provider == 'gce':
+ for interface in metadata['instance']['networkInterfaces']:
+ int_info = dict(ips=[interface['ip']], network_type=provider)
+ int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+ int_info['public_ips'].extend(interface['forwardedIps'])
+ _, _, network_id = interface['network'].rpartition('/')
+ int_info['network_id'] = network_id
+ network['interfaces'].append(int_info)
+ _, _, zone = metadata['instance']['zone'].rpartition('/')
+ facts['zone'] = zone
+ facts['external_id'] = metadata['instance']['id']
+
+ # Default to no sdn for GCE deployments
+ facts['use_openshift_sdn'] = False
+
+ # GCE currently only supports a single interface
+ network['ip'] = network['interfaces'][0]['ips'][0]
+ network['public_ip'] = network['interfaces'][0]['public_ips'][0]
+ network['hostname'] = metadata['instance']['hostname']
+
+ # TODO: attempt to resolve public_hostname
+ network['public_hostname'] = network['public_ip']
+ elif provider == 'ec2':
+ for interface in sorted(metadata['network']['interfaces']['macs'].values(),
+ key=lambda x: x['device-number']):
+ int_info = dict()
+ var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
+ for ips_var, int_var in var_map.iteritems():
+ ips = interface[int_var]
+ int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
+ int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
+ int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+ network['interfaces'].append(int_info)
+ facts['zone'] = metadata['placement']['availability-zone']
+ facts['external_id'] = metadata['instance-id']
+
+ # TODO: actually attempt to determine default local and public ips
+ # by using the ansible default ip fact and the ipv4-associations
+ # form the ec2 metadata
+ network['ip'] = metadata['local-ipv4']
+ network['public_ip'] = metadata['public-ipv4']
+
+ # TODO: verify that local hostname makes sense and is resolvable
+ network['hostname'] = metadata['local-hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['public-hostname']
+ elif provider == 'openstack':
+ # openstack ec2 compat api does not support network interfaces and
+ # the version tested on did not include the info in the openstack
+ # metadata api, should be updated if neutron exposes this.
+
+ facts['zone'] = metadata['availability_zone']
+ facts['external_id'] = metadata['uuid']
+ network['ip'] = metadata['ec2_compat']['local-ipv4']
+ network['public_ip'] = metadata['ec2_compat']['public-ipv4']
+
+ # TODO: verify local hostname makes sense and is resolvable
+ network['hostname'] = metadata['hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+
+ facts['network'] = network
+ return facts
+
+ def init_provider_facts(self):
+ provider_info = self.guess_host_provider()
+ provider_facts = self.normalize_provider_facts(
+ provider_info.get('name'),
+ provider_info.get('metadata')
+ )
+ return provider_facts
+
+ def get_facts(self):
+ # TODO: transform facts into cleaner format (openshift_<blah> instead
+ # of openshift.<blah>
+ return self.facts
+
+ def init_local_facts(self, facts={}):
+ changed = False
+
+ local_facts = ConfigParser.SafeConfigParser()
+ local_facts.read(self.filename)
+
+ section = self.role
+ if not local_facts.has_section(section):
+ local_facts.add_section(section)
+ changed = True
+
+ for key, value in facts.iteritems():
+ if isinstance(value, bool):
+ value = str(value)
+ if not value:
+ continue
+ if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
+ local_facts.set(section, key, value)
+ changed = True
+
+ if changed and not module.check_mode:
+ try:
+ fact_dir = os.path.dirname(self.filename)
+ if not os.path.exists(fact_dir):
+ os.makedirs(fact_dir)
+ with open(self.filename, 'w') as fact_file:
+ local_facts.write(fact_file)
+ except (IOError, OSError) as e:
+ raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+ self.changed = changed
+
+ role_facts = dict()
+ for section in local_facts.sections():
+ role_facts[section] = dict()
+ for opt, val in local_facts.items(section):
+ role_facts[section][opt] = val
+ return role_facts
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ role=dict(default='common',
+ choices=OpenShiftFacts.known_roles,
+ required=False),
+ local_facts=dict(default={}, type='dict', required=False),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ role = module.params['role']
+ local_facts = module.params['local_facts']
+ fact_file = '/etc/ansible/facts.d/openshift.fact'
+
+ openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
+
+ file_params = module.params.copy()
+ file_params['path'] = fact_file
+ file_args = module.load_file_common_arguments(file_params)
+ changed = module.set_fs_attributes_if_different(file_args,
+ openshift_facts.changed)
+
+ return module.exit_json(changed=changed,
+ ansible_facts=openshift_facts.get_facts())
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.facts import *
+from ansible.module_utils.urls import *
+main()
diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml
new file mode 100644
index 000000000..0be3afd24
--- /dev/null
+++ b/roles/openshift_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies: []
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
new file mode 100644
index 000000000..5a7d10d25
--- /dev/null
+++ b/roles/openshift_facts/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Gather OpenShift facts
+ openshift_facts:
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 5a1b889b2..9f9d0a613 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -13,21 +13,24 @@ Role Variables
--------------
From this role:
-| Name | Default value |
-|
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? |
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up |
-| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|-------------------------------------|-----------------------|--------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_registry_url | UNDEF | Default docker registry to use |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
From openshift_common:
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| Name | Default Value | |
+|-------------------------------|----------------|----------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 0159afbb5..87fb347a8 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,16 +1,17 @@
---
-openshift_master_manage_service_externally: false
-openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}"
openshift_node_ips: []
+
+# TODO: update setting these values based on the facts
+# TODO: update for console port change
os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
-- service: etcd peer
- port: 7001/tcp
- service: OpenShift api https
port: 8443/tcp
-- service: OpenShift web console https
- port: 8444/tcp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
+- service: former OpenShift web console port
+ port: 8444/tcp
+- service: former etcd peer port
+ port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 503d08d41..6fd4dfb51 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,3 @@
---
- name: restart openshift-master
service: name=openshift-master state=restarted
- when: not openshift_master_manage_service_externally
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d5f4776dc..aa615df39 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,17 +1,37 @@
---
+# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
+# etcd_use_ssl actually change the master config.
+
+- name: Set master OpenShift facts
+ openshift_facts:
+ role: 'master'
+ local_facts:
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+# TODO: We should pre-generate the master config and point to the generated
+# config rather than setting command line flags here
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
- openshift_node_ips %} --nodes={{ openshift_node_ips
- | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+ line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
+# TODO: should this be populated by a fact based on the deployment type
+# (origin, online, enterprise)?
- name: Set default registry url
lineinfile:
dest: /etc/sysconfig/openshift-master
@@ -21,61 +41,18 @@
notify:
- restart openshift-master
-- name: Set master OpenShift facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: master
- option: debug_level
- value: "{{ openshift_master_debug_level }}"
- - section: master
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: master
- option: externally_managed
- value: "{{ openshift_master_manage_service_externally }}"
-
-# TODO: remove this when origin PR #1298 has landed in OSE
-- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal
- command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/
- args:
- creates: /etc/systemd/system/openshift-master.service
-- ini_file:
- dest: /etc/systemd/system/openshift-master.service
- option: TimeoutStartSec
- section: Service
- value: 300
- state: present
- register: result
-- command: systemctl daemon-reload
- when: result | changed
-# End of workaround pending PR #1298
-
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
- when: not openshift_master_manage_service_externally
- register: result
-
-#TODO: remove this when origin PR #1204 has landed in OSE
-- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
- pause: seconds=30
- when: result | changed
-# End of workaround pending PR #1204
-- name: Disable openshift-master if openshift-master is managed externally
- service: name=openshift-master enabled=false
- when: openshift_master_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
- name: Create .kube directory
file:
path: /root/.kube
state: directory
mode: 0700
+
+# TODO: Update this file if the contents of the source file are not present in
+# the dest file, will need to make sure to ignore things that could be added
- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+ command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
args:
creates: /root/.kube/.kubeconfig
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
deleted file mode 100644
index 9a8c4bba2..000000000
--- a/roles/openshift_master/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_host_type: master
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 9210bab16..83359f164 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -16,20 +16,15 @@ Role Variables
From this role:
| Name | Default value | |
|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? |
| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts |
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
-| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory |
From openshift_common:
| Name | Default Value | |
|-------------------------------|---------------------|---------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c45524f16..df7ec41b6 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,10 +1,4 @@
---
-openshift_node_manage_service_externally: false
-openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
-openshift_node_resources:
- capacity:
- cpu:
- memory:
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index f7aa36d88..ca2992637 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,4 @@
---
- name: restart openshift-node
service: name=openshift-node state=restarted
- when: not openshift_node_manage_service_externally
+ when: not openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py
deleted file mode 100644
index 63079e59b..000000000
--- a/roles/openshift_node/library/openshift_register_node.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-
-DOCUMENTATION = '''
----
-module: openshift_register_node
-short_description: This module registers an openshift-node with an openshift-master
-author: Jason DeTiberus
-requirements: [ openshift-node ]
-notes: Node resources can be specified using either the resources option or the following options: cpu, memory
-options:
- name:
- description:
- - id for this node (usually the node fqdn)
- required: true
- hostIP:
- description:
- - ip address for this node
- required: false
- cpu:
- description:
- - number of CPUs for this node
- required: false
- default: number of logical CPUs detected
- memory:
- description:
- - Memory available for this node in bytes
- required: false
- default: 80% MemTotal
- resources:
- description:
- - A json string representing Node resources
- required: false
-'''
-EXAMPLES = '''
-# Minimal node registration
-- openshift_register_node: name=ose3.node.example.com
-
-# Node registration with all options (using cpu and memory options)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- cpu: 1
- memory: 1073741824
-
-# Node registration with all options (using resources option)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- resources:
- capacity:
- cpu: 1
- memory: 1073741824
-'''
-
-def main():
- module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True),
- hostIP = dict(),
- apiVersion = dict(),
- cpu = dict(),
- memory = dict(),
- resources = dict(),
- client_config = dict(),
- client_cluster = dict(default = 'master'),
- client_context = dict(default = 'master'),
- client_user = dict(default = 'admin')
- ),
- mutually_exclusive = [
- ['resources', 'cpu'],
- ['resources', 'memory']
- ],
- supports_check_mode=True
- )
-
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
- if not (user_has_client_config or module.params['client_config']):
- module.fail_json(msg="Could not locate client configuration, "
- "client_config must be specified if "
- "~/.kube/.kubeconfig is not present")
-
- client_opts = []
- if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
-
- try:
- output = check_output(["/usr/bin/openshift", "ex", "config", "view",
- "-o", "json"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get client configuration",
- command=e.cmd, returncode=e.returncode, output=e.output)
-
- config = json.loads(output)
- if not (bool(config['clusters']) or bool(config['contexts']) or
- bool(config['current-context']) or bool(config['users'])):
- module.fail_json(msg="Client config missing required values",
- output=output)
-
- client_context = module.params['client_context']
- if client_context:
- config_context = next((context for context in config['contexts']
- if context['name'] == client_context), None)
- if not config_context:
- module.fail_json(msg="Context %s not found in client config" %
- client_context)
- if not config['current-context'] or config['current-context'] != client_context:
- client_opts.append("--context=%s" % client_context)
-
- client_user = module.params['client_user']
- if client_user:
- config_user = next((user for user in config['users']
- if user['name'] == client_user), None)
- if not config_user:
- module.fail_json(msg="User %s not found in client config" %
- client_user)
- if client_user != config_context['context']['user']:
- client_opts.append("--user=%s" % client_user)
-
- client_cluster = module.params['client_cluster']
- if client_cluster:
- config_cluster = next((cluster for cluster in config['clusters']
- if cluster['name'] == client_cluster), None)
- if not client_cluster:
- module.fail_json(msg="Cluster %s not found in client config" %
- client_cluster)
- if client_cluster != config_context['context']['cluster']:
- client_opts.append("--cluster=%s" % client_cluster)
-
- node_def = dict(
- id = module.params['name'],
- kind = 'Node',
- apiVersion = 'v1beta1',
- resources = dict(
- capacity = dict()
- )
- )
-
- for key, value in module.params.iteritems():
- if key in ['cpu', 'memory']:
- node_def['resources']['capacity'][key] = value
- elif key == 'name':
- node_def['id'] = value
- elif key != 'client_config':
- if value:
- node_def[key] = value
-
- if not node_def['resources']['capacity']['cpu']:
- node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count()
-
- if not node_def['resources']['capacity']['memory']:
- with open('/proc/meminfo', 'r') as mem:
- for line in mem:
- entries = line.split()
- if str(entries.pop(0)) == 'MemTotal:':
- mem_total_kb = int(entries.pop(0))
- mem_capacity = int(mem_total_kb * 1024 * .75)
- node_def['resources']['capacity']['memory'] = mem_capacity
- break
-
- try:
- output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get node list", command=e.cmd,
- returncode=e.returncode, output=e.output)
-
- if re.search(module.params['name'], output, re.MULTILINE):
- module.exit_json(changed=False, node_def=node_def)
- elif module.check_mode:
- module.exit_json(changed=True, node_def=node_def)
-
- config_def = dict(
- metadata = dict(
- name = "add-node-%s" % module.params['name']
- ),
- kind = 'Config',
- apiVersion = 'v1beta1',
- items = [node_def]
- )
-
- p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, close_fds=True)
- (out, err) = p.communicate(module.jsonify(config_def))
- ret = p.returncode
-
- if ret != 0:
- if re.search("minion \"%s\" already exists" % module.params['name'],
- err):
- module.exit_json(changed=False,
- msg="node definition already exists", config_def=config_def)
- else:
- module.fail_json(msg="Node creation failed.", ret=ret, out=out,
- err=err, config_def=config_def)
-
- module.exit_json(changed=True, out=out, err=err, ret=ret,
- node_def=config_def)
-
-# import module snippets
-from ansible.module_utils.basic import *
-main()
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 6721c7401..e3c04585b 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,27 +1,38 @@
---
+# TODO: allow for overriding default ports where possible
+# TODO: trigger the external service when restart is needed
+
+- name: Set node OpenShift facts
+ openshift_facts:
+ role: 'node'
+ local_facts:
+ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+
+- name: Test if node certs and config exist
+ stat: path={{ item }}
+ failed_when: not result.stat.exists
+ register: result
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_path }}/cert.crt"
+ - "{{ cert_path }}/key.key"
+ - "{{ cert_path }}/.kubeconfig"
+ - "{{ cert_path }}/server.crt"
+ - "{{ cert_path }}/server.key"
+ - "{{ cert_parent_path }}/ca/cert.crt"
+ #- "{{ cert_path }}/node.yaml"
+
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
-- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
-- name: Retrieve OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
- ignore_errors: yes
-
-- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
-
-- name: Store OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
- ignore_errors: yes
-
-- local_action: file name={{ mktemp.stdout }} state=absent
-
+# --create-certs=false is a temporary workaround until
+# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
+# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --loglevel={{ openshift_node_debug_level }}\""
+ line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
notify:
- restart openshift-node
@@ -34,45 +45,10 @@
notify:
- restart openshift-node
-- name: Set OpenShift node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: node
- option: debug_level
- value: "{{ openshift_node_debug_level }}"
- - section: node
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: node
- option: externally_managed
- value: "{{ openshift_node_manage_service_externally }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-node
- service: name=openshift-node enabled=yes state=restarted
- when: not openshift_node_manage_service_externally
+ service: name=openshift-node enabled=yes state=started
+ when: not openshift.common.use_openshift_sdn|bool
- name: Disable openshift-node if openshift-node is managed externally
service: name=openshift-node enabled=false
- when: openshift_node_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
- file:
- path: /root/.kube
- state: directory
- mode: 0700
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
- args:
- creates: /root/.kube/.kubeconfig
-
-- name: Register node (if not already registered)
- openshift_register_node:
- name: "{{ openshift_hostname }}"
- resources: "{{ openshift_node_resources }}"
+ when: openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
deleted file mode 100644
index 9841d52f9..000000000
--- a/roles/openshift_node/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_host_type: node
diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md
new file mode 100644
index 000000000..b96faa044
--- /dev/null
+++ b/roles/openshift_register_nodes/README.md
@@ -0,0 +1,34 @@
+OpenShift Register Nodes
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
new file mode 100644
index 000000000..3501e8922
--- /dev/null
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_kube_api_version: v1beta1
+openshift_cert_dir: openshift.local.certificates
+openshift_cert_dir_parent: /var/lib/openshift
+openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
new file mode 100755
index 000000000..8ebeb087a
--- /dev/null
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import os
+import multiprocessing
+import socket
+from subprocess import check_output, Popen
+from decimal import *
+
+DOCUMENTATION = '''
+---
+module: kubernetes_register_node
+short_description: Registers a kubernetes node with a master
+description:
+ - Registers a kubernetes node with a master
+options:
+ name:
+ default: null
+ description:
+ - Identifier for this node (usually the node fqdn).
+ required: true
+ api_verison:
+ choices: ['v1beta1', 'v1beta3']
+ default: 'v1beta1'
+ description:
+ - Kubernetes API version to use
+ required: true
+ host_ip:
+ default: null
+ description:
+ - IP Address to associate with the node when registering.
+ Available in the following API versions: v1beta1.
+ required: false
+ hostnames:
+ default: []
+ description:
+ - Valid hostnames for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ external_ips:
+ default: []
+ description:
+ - External IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ internal_ips:
+ default: []
+ description:
+ - Internal IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ cpu:
+ default: null
+ description:
+ - Number of CPUs to allocate for this node. When using the v1beta1
+ API, you must specify the CPU count as a floating point number
+ with no more than 3 decimal places. API version v1beta3 and newer
+ accepts arbitrary float values.
+ required: false
+ memory:
+ default: null
+ description:
+ - Memory available for this node. When using the v1beta1 API, you
+ must specify the memory size in bytes. API version v1beta3 and
+ newer accepts binary SI and decimal SI values.
+ required: false
+'''
+EXAMPLES = '''
+# Minimal node registration
+- openshift_register_node: name=ose3.node.example.com
+
+# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of
+# Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta1
+ hostIP: 192.168.1.1
+ cpu: 1
+ memory: 500000000
+
+# Node registration using the v1beta3 API, setting an alternate hostname,
+# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta3
+ external_ips: ['192.168.1.5']
+ internal_ips: ['10.0.0.5']
+ hostnames: ['ose2.node.internal.local']
+ cpu: 3.5
+ memory: 1Ti
+'''
+
+
+class ClientConfigException(Exception):
+ pass
+
+class ClientConfig:
+ def __init__(self, client_opts, module):
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+ "config", "view", "-o",
+ "json"] + client_opts,
+ check_rc = True)
+ self.config = json.loads(output)
+
+ if not (bool(self.config['clusters']) or
+ bool(self.config['contexts']) or
+ bool(self.config['current-context']) or
+ bool(self.config['users'])):
+ raise ClientConfigException(msg="Client config missing required " \
+ "values",
+ output=output)
+
+ def current_context(self):
+ return self.config['current-context']
+
+ def section_has_value(self, section_name, value):
+ section = self.config[section_name]
+ if isinstance(section, dict):
+ return value in section
+ else:
+ val = next((item for item in section
+ if item['name'] == value), None)
+ return val is not None
+
+ def has_context(self, context):
+ return self.section_has_value('contexts', context)
+
+ def has_user(self, user):
+ return self.section_has_value('users', user)
+
+ def has_cluster(self, cluster):
+ return self.section_has_value('clusters', cluster)
+
+ def get_value_for_context(self, context, attribute):
+ contexts = self.config['contexts']
+ if isinstance(contexts, dict):
+ return contexts[context][attribute]
+ else:
+ return next((c['context'][attribute] for c in contexts
+ if c['name'] == context), None)
+
+ def get_user_for_context(self, context):
+ return self.get_value_for_context(context, 'user')
+
+ def get_cluster_for_context(self, context):
+ return self.get_value_for_context(context, 'cluster')
+
+class Util:
+ @staticmethod
+ def remove_empty_elements(mapping):
+ if isinstance(mapping, dict):
+ m = mapping.copy()
+ for key, val in mapping.iteritems():
+ if not val:
+ del m[key]
+ return m
+ else:
+ return mapping
+
+class NodeResources:
+ def __init__(self, version, cpu=None, memory=None):
+ if version == 'v1beta1':
+ self.resources = dict(capacity=dict())
+ self.resources['capacity']['cpu'] = cpu
+ self.resources['capacity']['memory'] = memory
+
+ def get_resources(self):
+ return Util.remove_empty_elements(self.resources)
+
+class NodeSpec:
+ def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
+ if version == 'v1beta3':
+ self.spec = dict(podCIDR=cidr, externalID=externalID,
+ capacity=dict())
+ self.spec['capacity']['cpu'] = cpu
+ self.spec['capacity']['memory'] = memory
+
+ def get_spec(self):
+ return Util.remove_empty_elements(self.spec)
+
+class NodeStatus:
+ def addAddresses(self, addressType, addresses):
+ addressList = []
+ for address in addresses:
+ addressList.append(dict(type=addressType, address=address))
+ return addressList
+
+ def __init__(self, version, externalIPs = [], internalIPs = [],
+ hostnames = []):
+ if version == 'v1beta3':
+ self.status = dict(addresses = addAddresses('ExternalIP',
+ externalIPs) +
+ addAddresses('InternalIP',
+ internalIPs) +
+ addAddresses('Hostname',
+ hostnames))
+
+ def get_status(self):
+ return Util.remove_empty_elements(self.status)
+
+class Node:
+ def __init__(self, module, client_opts, version='v1beta1', name=None,
+ hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
+ cpu=None, memory=None, labels=dict(), annotations=dict(),
+ podCIDR=None, externalID=None):
+ self.module = module
+ self.client_opts = client_opts
+ if version == 'v1beta1':
+ self.node = dict(id = name,
+ kind = 'Node',
+ apiVersion = version,
+ hostIP = hostIP,
+ resources = NodeResources(version, cpu, memory),
+ cidr = podCIDR,
+ labels = labels,
+ annotations = annotations,
+ externalID = externalID
+ )
+ elif version == 'v1beta3':
+ metadata = dict(name = name,
+ labels = labels,
+ annotations = annotations
+ )
+ self.node = dict(kind = 'Node',
+ apiVersion = version,
+ metadata = metadata,
+ spec = NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status = NodeStatus(version, externalIPs,
+ internalIPs, hostnames),
+ )
+
+ def get_name(self):
+ if self.node['apiVersion'] == 'v1beta1':
+ return self.node['id']
+ elif self.node['apiVersion'] == 'v1beta3':
+ return self.node['name']
+
+ def get_node(self):
+ node = self.node.copy()
+ if self.node['apiVersion'] == 'v1beta1':
+ node['resources'] = self.node['resources'].get_resources()
+ elif self.node['apiVersion'] == 'v1beta3':
+ node['spec'] = self.node['spec'].get_spec()
+ node['status'] = self.node['status'].get_status()
+ return Util.remove_empty_elements(node)
+
+ def exists(self):
+ _, output, error = self.module.run_command(["/usr/bin/osc", "get",
+ "nodes"] + self.client_opts,
+ check_rc = True)
+ if re.search(self.module.params['name'], output, re.MULTILINE):
+ return True
+ return False
+
+ def create(self):
+ cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
+ rc, output, error = self.module.run_command(cmd,
+ data=self.module.jsonify(self.get_node()))
+ if rc != 0:
+ if re.search("minion \"%s\" already exists" % self.get_name(),
+ error):
+ self.module.exit_json(changed=False,
+ msg="node definition already exists",
+ node=self.get_node())
+ else:
+ self.module.fail_json(msg="Node creation failed.", rc=rc,
+ output=output, error=error,
+ node=self.get_node())
+ else:
+ return True
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required = True, type = 'str'),
+ host_ip = dict(type = 'str'),
+ hostnames = dict(type = 'list', default = []),
+ external_ips = dict(type = 'list', default = []),
+ internal_ips = dict(type = 'list', default = []),
+ api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
+ choices = ['v1beta1', 'v1beta3']),
+ cpu = dict(type = 'str'),
+ memory = dict(type = 'str'),
+ labels = dict(type = 'dict', default = {}), # TODO: needs documented
+ annotations = dict(type = 'dict', default = {}), # TODO: needs documented
+ pod_cidr = dict(type = 'str'), # TODO: needs documented
+ external_id = dict(type = 'str'), # TODO: needs documented
+ client_config = dict(type = 'str'), # TODO: needs documented
+ client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
+ client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
+ client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
+ ),
+ mutually_exclusive = [
+ ['host_ip', 'external_ips'],
+ ['host_ip', 'internal_ips'],
+ ['host_ip', 'hostnames'],
+ ],
+ supports_check_mode=True
+ )
+
+ user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ if not (user_has_client_config or module.params['client_config']):
+ module.fail_json(msg="Could not locate client configuration, "
+ "client_config must be specified if "
+ "~/.kube/.kubeconfig is not present")
+
+ client_opts = []
+ if module.params['client_config']:
+ client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+
+ try:
+ config = ClientConfig(client_opts, module)
+ except ClientConfigException as e:
+ module.fail_json(msg="Failed to get client configuration", exception=e)
+
+ client_context = module.params['client_context']
+ if config.has_context(client_context):
+ if client_context != config.current_context():
+ client_opts.append("--context=%s" % client_context)
+ else:
+ module.fail_json(msg="Context %s not found in client config" %
+ client_context)
+
+ client_user = module.params['client_user']
+ if config.has_user(client_user):
+ if client_user != config.get_user_for_context(client_context):
+ client_opts.append("--user=%s" % client_user)
+ else:
+ module.fail_json(msg="User %s not found in client config" %
+ client_user)
+
+ client_cluster = module.params['client_cluster']
+ if config.has_cluster(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_cluster):
+ client_opts.append("--cluster=%s" % client_cluster)
+ else:
+ module.fail_json(msg="Cluster %s not found in client config" %
+ client_cluster)
+
+ # TODO: provide sane defaults for some (like hostname, externalIP,
+ # internalIP, etc)
+ node = Node(module, client_opts, module.params['api_version'],
+ module.params['name'], module.params['host_ip'],
+ module.params['hostnames'], module.params['external_ips'],
+ module.params['internal_ips'], module.params['cpu'],
+ module.params['memory'], module.params['labels'],
+ module.params['annotations'], module.params['pod_cidr'],
+ module.params['external_id'])
+
+ # TODO: attempt to support changing node settings where possible and/or
+ # modifying node resources
+ if node.exists():
+ module.exit_json(changed=False, node=node.get_node())
+ elif module.check_mode:
+ module.exit_json(changed=True, node=node.get_node())
+ else:
+ if node.create():
+ module.exit_json(changed=True,
+ msg="Node created successfully",
+ node=node.get_node())
+ else:
+ module.fail_json(msg="Unknown error creating node",
+ node=node.get_node())
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml
new file mode 100644
index 000000000..e40a152c1
--- /dev/null
+++ b/roles/openshift_register_nodes/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
+
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
new file mode 100644
index 000000000..7319b88b1
--- /dev/null
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -0,0 +1,67 @@
+---
+# TODO: support new create-config command to generate node certs and config
+# TODO: recreate master/node configs if settings that affect the configs
+# change (hostname, public_hostname, ip, public_ip, etc)
+
+# TODO: create a failed_when condition
+- name: Create node server certificates
+ command: >
+ /usr/bin/openshift admin create-server-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+ --hostnames={{ [item.openshift.common.hostname,
+ item.openshift.common.public_hostname]|unique|join(",") }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
+ with_items: openshift_nodes
+ register: server_cert_result
+
+# TODO: create a failed_when condition
+- name: Create node client certificates
+ command: >
+ /usr/bin/openshift admin create-node-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+ --node-name={{ item.openshift.common.hostname }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
+ with_items: openshift_nodes
+ register: node_cert_result
+
+# TODO: create a failed_when condition
+- name: Create kubeconfigs for nodes
+ command: >
+ /usr/bin/openshift admin create-kubeconfig
+ --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+ --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+ --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
+ with_items: openshift_nodes
+ register: kubeconfig_result
+
+- name: Register unregistered nodes
+ kubernetes_register_node:
+ client_user: openshift-client
+ name: "{{ item.openshift.common.hostname }}"
+ api_version: "{{ openshift_kube_api_version }}"
+ cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
+ memory: "{{ item.openshift.node.resources_memory | default(None) }}"
+ pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}"
+ host_ip: "{{ item.openshift.common.ip }}"
+ labels: "{{ item.openshift.node.labels | default({}) }}"
+ annotations: "{{ item.openshift.node.annotations | default({}) }}"
+ external_id: "{{ item.openshift.node.external_id }}"
+ # TODO: support customizing other attributes such as: client_config,
+ # client_cluster, client_context, client_user
+ # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
+ # internal_ips, external_id
+ with_items: openshift_nodes
+ register: register_result
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
new file mode 100644
index 000000000..6713e11fc
--- /dev/null
+++ b/roles/openshift_repos/README.md
@@ -0,0 +1,38 @@
+OpenShift Repos
+================
+
+Configures repositories for an OpenShift installation
+
+Requirements
+------------
+
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | online | Possible values enterprise, origin, online |
+| openshift_additional_repos | {} | TODO |
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 6fe2bf621..1730207f4 100644
--- a/roles/repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,5 +1,7 @@
---
# TODO: once we are able to configure/deploy origin using the openshift roles,
# then we should default to origin
+
+# TODO: push the defaulting of these values to the openshift_facts module
openshift_deployment_type: online
openshift_additional_repos: {}
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
index 7b40671a4..7b40671a4 100644
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
+++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
index 0f83b622d..0f83b622d 100644
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-release
+++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
diff --git a/roles/repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo
index 1deae2939..1deae2939 100644
--- a/roles/repos/files/online/epel7-kubernetes.repo
+++ b/roles/openshift_repos/files/online/epel7-kubernetes.repo
diff --git a/roles/repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo
index c7629872d..c7629872d 100644
--- a/roles/repos/files/online/epel7-openshift.repo
+++ b/roles/openshift_repos/files/online/epel7-openshift.repo
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
index cfe41f691..cfe41f691 100644
--- a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
+++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
index ddc93193d..ddc93193d 100644
--- a/roles/repos/files/online/oso-rhui-rhel-7-server.repo
+++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
diff --git a/roles/repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml
new file mode 100644
index 000000000..0558b822c
--- /dev/null
+++ b/roles/openshift_repos/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: TODO
+ description: OpenShift Repositories
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 43786da41..bb1551d37 100644
--- a/roles/repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -1,6 +1,12 @@
---
# TODO: Add flag for enabling EPEL repo, default to false
+# TODO: Add subscription-management config, with parameters
+# for username, password, poolid(name), and official repos to
+# enable/disable. Might need to make a module that extends the
+# subscription management module to take a poolid and enable/disable the
+# proper repos correctly.
+
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
@@ -8,6 +14,11 @@
- fail: msg="OpenShift Origin support is not currently enabled"
when: openshift_deployment_type == 'origin'
+- name: Ensure libselinux-python is installed
+ yum:
+ pkg: libselinux-python
+ state: present
+
- name: Create any additional repos that are defined
template:
src: yum_repo.j2
diff --git a/roles/repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..7ea2c7460 100644
--- a/roles/repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
diff --git a/roles/repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index bbb4c77e7..bbb4c77e7 100644
--- a/roles/repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml
deleted file mode 100644
index da7655546..000000000
--- a/roles/openshift_sdn_master/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml
index e6e5514d1..5de32cc13 100644
--- a/roles/openshift_sdn_master/meta/main.yml
+++ b/roles/openshift_sdn_master/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index e1761afdc..f2d61043b 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -1,4 +1,13 @@
---
+# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
+# started yet
+
+- name: Set master sdn OpenShift facts
+ openshift_facts:
+ role: 'master_sdn'
+ local_facts:
+ debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-master
yum:
pkg: openshift-sdn-master
@@ -8,17 +17,10 @@
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
notify:
- restart openshift-sdn-master
-- name: Set openshift-sdn-master facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-master
- option: debug_level
- value: "{{ openshift_sdn_master_debug_level }}"
-
- name: Enable openshift-sdn-master
service:
name: openshift-sdn-master
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
index 294550219..e6b6a9503 100644
--- a/roles/openshift_sdn_node/README.md
+++ b/roles/openshift_sdn_node/README.md
@@ -17,19 +17,12 @@ From this role:
| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-From openshift_node:
-| Name | Default value | |
-|-----------------------|------------------|--------------------------------------|
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
-
-
From openshift_common:
| Name | Default value | |
|-------------------------------|---------------------|----------------------------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml
deleted file mode 100644
index 9612d9d91..000000000
--- a/roles/openshift_sdn_node/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml
index ab45ff51e..ffe10f836 100644
--- a/roles/openshift_sdn_node/meta/main.yml
+++ b/roles/openshift_sdn_node/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index ff05a6972..729c28879 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -1,4 +1,10 @@
---
+- name: Set node sdn OpenShift facts
+ openshift_facts:
+ role: 'node_sdn'
+ local_facts:
+ debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-node
yum:
pkg: openshift-sdn-node
@@ -14,28 +20,19 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
- regex: '^(MASTER_URL=)'
- line: '\1"http://{{ openshift_master_ips | first }}:4001"'
+ line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
- line: '\1"{{ openshift_public_ip }}"'
+ line: '\1"{{ openshift.common.ip }}"'
# TODO lock down the insecure-registry config to a more sane value than
# 0.0.0.0/0
- regex: '^(DOCKER_OPTIONS=)'
line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
notify: restart openshift-sdn-node
-- name: Set openshift-sdn-node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-node
- option: debug_level
- value: "{{ openshift_sdn_node_debug_level }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-sdn-node
service:
name: openshift-sdn-node
enabled: yes
- state: restarted
+ state: started
diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml
new file mode 100644
index 000000000..208065df2
--- /dev/null
+++ b/roles/os_env_extras_node/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+# From the origin rpm there exists instructions on how to
+# setup origin properly. The following steps come from there
+- name: Change root to be in the Docker group
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index fef710055..90588d2ae 100644..100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
from subprocess import call, check_output
@@ -51,11 +52,13 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager:
- def __init__(self, module, ip_version, check_mode, chain):
+ def __init__(self, module):
self.module = module
- self.ip_version = ip_version
- self.check_mode = check_mode
- self.chain = chain
+ self.ip_version = module.params['ip_version']
+ self.check_mode = module.check_mode
+ self.chain = module.params['chain']
+ self.create_jump_rule = module.params['create_jump_rule']
+ self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
@@ -70,13 +73,16 @@ class IpTablesManager:
msg="Failed to save iptables rules",
cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ def verify_chain(self):
+ if not self.chain_exists():
+ self.create_chain()
+ if self.create_jump_rule and not self.jump_rule_exists():
+ self.create_jump()
+
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
- if not self.chain_exists():
- self.create_chain()
- if not self.jump_rule_exists():
- self.create_jump_rule()
+ self.verify_chain()
if self.check_mode:
self.changed = True
@@ -121,13 +127,13 @@ class IpTablesManager:
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
- def create_jump_rule(self):
+ def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
- cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+ cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
@@ -144,11 +150,11 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid INPUT rule
+ # Raise an exception if we do not find a valid rule
if not last_rule_num or not last_rule_target:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to find existing INPUT rules",
+ msg="Failed to find existing %s rules" % self.jump_rule_chain,
cmd=None, exit_code=None, output=None)
# Naively assume that if the last row is a REJECT rule, then
@@ -156,19 +162,20 @@ class IpTablesManager:
# assume that we can just append the rule.
if last_rule_target == 'REJECT':
# insert rule
- cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
else:
# append rule
- cmd = self.cmd + ['-A', 'INPUT']
+ cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
changed = True
self.output.append(output)
+ self.save()
except subprocess.CalledProcessError as e:
if '--line-numbers' in e.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing INPUT rules to "
+ msg="Failed to query existing %s rules to " % self.jump_rule_chain +
"determine jump rule location",
cmd=e.cmd, exit_code=e.returncode,
output=e.output)
@@ -192,6 +199,7 @@ class IpTablesManager:
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
+ self.save()
except subprocess.CalledProcessError as e:
raise IpTablesCreateChainError(
chain=self.chain,
@@ -200,7 +208,7 @@ class IpTablesManager:
)
def jump_rule_exists(self):
- cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
+ cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if subprocess.call(cmd) == 0 else False
def chain_exists(self):
@@ -220,9 +228,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove']),
- protocol=dict(required=True, choices=['tcp', 'udp']),
- port=dict(required=True, type='int'),
+ action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+ chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+ create_jump_rule=dict(required=False, type='bool', default=True),
+ jump_rule_chain=dict(required=False, default='INPUT'),
+ protocol=dict(required=False, choices=['tcp', 'udp']),
+ port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
@@ -232,16 +243,24 @@ def main():
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
- ip_version = module.params['ip_version']
- chain = 'OS_FIREWALL_ALLOW'
- iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+ if action in ['add', 'remove']:
+ if not protocol:
+ error = "protocol is required when action is %s" % action
+ module.fail_json(msg=error)
+ if not port:
+ error = "port is required when action is %s" % action
+ module.fail_json(msg=error)
+
+ iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
+ elif action == 'verify_chain':
+ iptables_manager.verify_chain()
except IpTablesError as e:
module.fail_json(msg=e.msg)
diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml
index 7a8cef6c5..8592371e8 100644
--- a/roles/os_firewall/meta/main.yml
+++ b/roles/os_firewall/meta/main.yml
@@ -1,3 +1,4 @@
+---
galaxy_info:
author: Jason DeTiberus
description: os_firewall
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 469cfab6f..b6bddd5c5 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,7 @@
yum:
name: firewalld
state: present
+ register: install_result
- name: Check if iptables-services is installed
command: rpm -q iptables-services
@@ -20,6 +21,10 @@
- ip6tables
when: pkg_check.rc == 0
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
- name: Start and enable firewalld service
service:
name: firewalld
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 87e77c083..7b5c00a9b 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,7 @@
with_items:
- iptables
- iptables-services
+ register: install_result
- name: Check if firewalld is installed
command: rpm -q firewalld
@@ -20,14 +21,15 @@
enabled: no
when: pkg_check.rc == 0
-- name: Start and enable iptables services
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Start and enable iptables service
service:
- name: "{{ item }}"
+ name: iptables
state: started
enabled: yes
- with_items:
- - iptables
- - ip6tables
register: result
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
new file mode 100644
index 000000000..4a2c3d47a
--- /dev/null
+++ b/roles/os_update_latest/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Update all packages
+ yum: name=* state=latest
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
new file mode 100755
index 000000000..f4f52909b
--- /dev/null
+++ b/roles/os_zabbix/library/zbxapi.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Purpose: An ansible module to communicate with zabbix.
+#
+
+import json
+import httplib2
+import sys
+import os
+import re
+
+class ZabbixAPI(object):
+ '''
+ ZabbixAPI class
+ '''
+ classes = {
+ 'Action': ['create', 'delete', 'get', 'update'],
+ 'Alert': ['get'],
+ 'Application': ['create', 'delete', 'get', 'massadd', 'update'],
+ 'Configuration': ['export', 'import'],
+ 'Dcheck': ['get'],
+ 'Dhost': ['get'],
+ 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Dservice': ['get'],
+ 'Event': ['acknowledge', 'get'],
+ 'Graph': ['create', 'delete', 'get', 'update'],
+ 'Graphitem': ['get'],
+ 'Graphprototype': ['create', 'delete', 'get', 'update'],
+ 'History': ['get'],
+ 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'],
+ 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Image': ['create', 'delete', 'get', 'update'],
+ 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Maintenance': ['create', 'delete', 'get', 'update'],
+ 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Mediatype': ['create', 'delete', 'get', 'update'],
+ 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Screen': ['create', 'delete', 'get', 'update'],
+ 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'],
+ 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'],
+ 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'],
+ 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Templatescreenitem': ['get'],
+ 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Triggerprototype': ['create', 'delete', 'get', 'update'],
+ 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'],
+ 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'],
+ 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'],
+ 'Usermedia': ['get'],
+ }
+
+ def __init__(self, data={}):
+ self.server = data['server'] or None
+ self.username = data['user'] or None
+ self.password = data['password'] or None
+ if any(map(lambda value: value == None, [self.server, self.username, self.password])):
+ print 'Please specify zabbix server url, username, and password.'
+ sys.exit(1)
+
+ self.verbose = data.has_key('verbose')
+ self.use_ssl = data.has_key('use_ssl')
+ self.auth = None
+
+ for class_name, method_names in self.classes.items():
+ #obj = getattr(self, class_name)(self)
+ #obj.__dict__
+ setattr(self, class_name.lower(), getattr(self, class_name)(self))
+
+ results = self.user.login(user=self.username, password=self.password)
+
+ if results[0]['status'] == '200':
+ if results[1].has_key('result'):
+ self.auth = results[1]['result']
+ elif results[1].has_key('error'):
+ print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error'])
+ sys.exit(1)
+ else:
+ print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
+ sys.exit(1)
+
+ def perform(self, method, params):
+ '''
+ This method calls your zabbix server.
+
+ It requires the following parameters in order for a proper request to be processed:
+
+ jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0;
+ method - the API method being called;
+ params - parameters that will be passed to the API method;
+ id - an arbitrary identifier of the request;
+ auth - a user authentication token; since we don't have one yet, it's set to null.
+ '''
+ http_method = "POST"
+ if params.has_key("http_method"):
+ http_method = params['http_method']
+
+ jsonrpc = "2.0"
+ if params.has_key('jsonrpc'):
+ jsonrpc = params['jsonrpc']
+
+ rid = 1
+ if params.has_key('id'):
+ rid = params['id']
+
+ http = None
+ if self.use_ssl:
+ http = httplib2.Http()
+ else:
+ http = httplib2.Http( disable_ssl_certificate_validation=True,)
+
+ headers = params.get('headers', {})
+ headers["Content-type"] = "application/json"
+
+ body = {
+ "jsonrpc": jsonrpc,
+ "method": method,
+ "params": params,
+ "id": rid,
+ 'auth': self.auth,
+ }
+
+ if method in ['user.login','api.version']:
+ del body['auth']
+
+ body = json.dumps(body)
+
+ if self.verbose:
+ print body
+ print method
+ print headers
+ httplib2.debuglevel = 1
+
+ response, results = http.request(self.server, http_method, body, headers)
+
+ if self.verbose:
+ print response
+ print results
+
+ try:
+ results = json.loads(results)
+ except ValueError as e:
+ results = {"error": e.message}
+
+ return response, results
+
+ '''
+ This bit of metaprogramming is where the ZabbixAPI subclasses are created.
+ For each of ZabbixAPI.classes we create a class from the key and methods
+ from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
+ to each subclass in order for each to be able to call the perform method.
+ '''
+ @staticmethod
+ def meta(class_name, method_names):
+ # This meta method allows a class to add methods to it.
+ def meta_method(Class, method_name):
+ # This template method is a stub method for each of the subclass
+ # methods.
+ def template_method(self, **params):
+ return self.parent.perform(class_name.lower()+"."+method_name, params)
+ template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name)
+ template_method.__name__ = method_name
+ # this is where the template method is placed inside of the subclass
+ # e.g. setattr(User, "create", stub_method)
+ setattr(Class, template_method.__name__, template_method)
+
+ # This class call instantiates a subclass. e.g. User
+ Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() })
+ # This init method gets placed inside of the Class
+ # to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
+ # is passed in to allow each class access to the perform method.
+ def __init__(self, parent):
+ self.parent = parent
+ # This attaches the init to the subclass. e.g. Create
+ setattr(Class, __init__.__name__, __init__)
+ # For each of our ZabbixAPI.classes dict values
+ # Create a method and attach it to our subclass.
+ # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
+ # 'update', 'iswritable', 'logout', 'addmedia', 'create',
+ # 'login', 'deletemedia', 'isreadable'],
+ # User.delete
+ # User.get
+ for method_name in method_names:
+ meta_method(Class, method_name)
+ # Return our subclass with all methods attached
+ return Class
+
+# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
+for class_name, method_names in ZabbixAPI.classes.items():
+ setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names))
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ zbx_class=dict(choices=ZabbixAPI.classes.keys()),
+ action=dict(default=None, type='str'),
+ params=dict(),
+ debug=dict(default=False, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', None)
+ if not user:
+ user = os.environ['ZABBIX_USER']
+
+ pw = module.params.get('password', None)
+ if not pw:
+ pw = os.environ['ZABBIX_PASSWORD']
+
+ server = module.params['server']
+
+ if module.params['debug']:
+ options['debug'] = True
+
+ api_data = {
+ 'user': user,
+ 'password': pw,
+ 'server': server,
+ }
+
+ if not user or not pw or not server:
+ module.fail_json('Please specify the user, password, and the zabbix server.')
+
+ zapi = ZabbixAPI(api_data)
+
+ zbx_class = module.params.get('zbx_class')
+ action = module.params.get('action')
+ params = module.params.get('params', {})
+
+
+ # Get the instance we are trying to call
+ zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
+ # Get the instance's method we are trying to call
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action]
+ # Make the call with the incoming params
+ results = zbx_action_method(zbx_class_inst, **params)
+
+ # Results Section
+ changed_state = False
+ status = results[0]['status']
+ if status not in ['200', '201']:
+ #changed_state = False
+ module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1]))
+
+ module.exit_json(**{'results': results[1]['result']})
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
new file mode 100644
index 000000000..51ecd5d34
--- /dev/null
+++ b/roles/yum_repos/README.md
@@ -0,0 +1,113 @@
+Yum Repos
+=========
+
+This role allows easy deployment of yum repository config files.
+
+Requirements
+------------
+
+Yum
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------|---------------|--------------------------------------------|
+| repo_files | None | |
+| repo_enabled | 1 | Should repos be enabled by default |
+| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default |
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+A single repo file containing a single repo:
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repo
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ skip_if_unavailable: yes
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+
+A single repo file containing a single repo, disabling gpgcheck
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgcheck: no
+
+A single repo file containing a single disabled repo
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ enabled: no
+
+A single repo file containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+
+Multiple repo files containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+ - id: joes_repos
+ repos:
+ - id: joes_repo
+ name: Joe's Less Awesome Repo
+ baseurl: https://joes.repo/is/here
+ gpgkey: https://joes.repo/pubkey.gpg
+ - id: joes_otherrepo
+ name: Joe's Other Less Awesome Repo
+ baseurl: https://joes.repo/is/there
+ gpgkey: https://joes.repo/pubkey.gpg
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+openshift online operations
diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml
new file mode 100644
index 000000000..515fb7a4a
--- /dev/null
+++ b/roles/yum_repos/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+repo_enabled: 1
+repo_gpgcheck: 1
diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml
new file mode 100644
index 000000000..6b8374da9
--- /dev/null
+++ b/roles/yum_repos/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: openshift operations
+ description:
+ company: Red Hat, Inc.
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml
new file mode 100644
index 000000000..a9903c6c6
--- /dev/null
+++ b/roles/yum_repos/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# Convert old params to new params
+- set_fact:
+ repo_files:
+ - id: "{{ repo_tag }}"
+ repos:
+ - id: "{{ repo_tag }}"
+ name: "{{ repo_name }}"
+ baseurl: "{{ repo_baseurl }}"
+ enabled: "{{ repo_enabled }}"
+ gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}"
+ sslverify: "{{ repo_sslverify | default(None) }}"
+ sslclientcert: "{{ repo_sslclientcert | default(None) }}"
+ sslclientkey: "{{ repo_sslclientkey | default(None) }}"
+ gpgkey: "{{ repo_gpgkey | default(None) }}"
+ when: repo_files is not defined
+
+- name: Verify repo_files is a list
+ assert:
+ that:
+ - repo_files is iterable and repo_files is not string and repo_files is not mapping
+
+- name: Verify repo_files items have an id and a repos list
+ assert:
+ that:
+ - item is mapping
+ - "'id' in item"
+ - "'repos' in item"
+ - item.repos is iterable and item.repos is not string and item.repos is not mapping
+ with_items: repo_files
+
+- name: Verify that repo_files.repos have the required keys
+ assert:
+ that:
+ - item.1 is mapping
+ - "'id' in item.1"
+ - "'name' in item.1"
+ - "'baseurl' in item.1"
+ with_subelements:
+ - repo_files
+ - repos
+
+- name: Installing yum-repo template
+ template:
+ src: yumrepo.j2
+ dest: /etc/yum.repos.d/{{ item.id }}.repo
+ with_items: repo_files
diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2
new file mode 100644
index 000000000..0dfdbfe43
--- /dev/null
+++ b/roles/yum_repos/templates/yumrepo.j2
@@ -0,0 +1,18 @@
+{% set repos = item.repos %}
+{% for repo in repos %}
+[{{ repo.id }}]
+name={{ repo.name }}
+baseurl={{ repo.baseurl }}
+{% set repo_enabled_value = repo.enabled | default(repo_enabled) %}
+{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %}
+enabled={{ enable_repo }}
+{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %}
+{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %}
+gpgcheck={{ enable_gpgcheck }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+
+{% endfor %}