summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/ansible_tower/tasks/main.yaml1
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_common/vars/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py92
-rw-r--r--roles/openshift_master/tasks/main.yml86
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_node/tasks/main.yml32
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml3
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py432
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml69
-rw-r--r--roles/openshift_register_nodes/vars/main.yml7
-rw-r--r--roles/openshift_repos/README.md2
-rw-r--r--roles/openshift_repos/defaults/main.yaml5
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta61
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release63
-rw-r--r--roles/openshift_repos/files/online/epel7-kubernetes.repo6
-rw-r--r--roles/openshift_repos/files/online/epel7-openshift.repo6
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo23
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo21
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo10
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo (renamed from roles/openshift_repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/files/removed/repos/epel7-openshift.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo0
-rw-r--r--roles/openshift_repos/tasks/main.yaml14
-rw-r--r--roles/openshift_repos/templates/yum_repo.j21
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml11
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml32
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py92
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml1
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml1
33 files changed, 613 insertions, 480 deletions
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index e9bde9478..1d75a95e6 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -9,6 +9,7 @@
- ansible
- telnet
- ack
+ - python-ansible-tower-cli
- name: download Tower setup
get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 941190534..c55677c3f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Set common OpenShift facts
openshift_facts:
- role: 'common'
+ role: common
local_facts:
cluster_id: "{{ openshift_cluster_id | default('default') }}"
debug_level: "{{ openshift_debug_level | default(0) }}"
@@ -10,7 +10,7 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-
+ deployment_type: "{{ openshift_deployment_type }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 50816d319..9f657a2c7 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,3 +5,7 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
+
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0dd343443..1e0d5c605 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -21,8 +21,11 @@ class OpenShiftFactsUnsupportedRoleError(Exception):
class OpenShiftFactsFileWriteError(Exception):
pass
+class OpenShiftFactsMetadataUnavailableError(Exception):
+ pass
+
class OpenShiftFacts():
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -169,20 +172,18 @@ class OpenShiftFacts():
return hostname
def get_defaults(self, roles):
- hardware_facts = self.get_hardware_facts()
- net_facts = self.get_net_facts()
- base_facts = self.get_base_facts()
+ ansible_facts = self.get_ansible_facts()
defaults = dict()
common = dict(use_openshift_sdn=True)
- ip = net_facts['default_ipv4']['address']
+ ip = ansible_facts['default_ipv4']['address']
common['ip'] = ip
common['public_ip'] = ip
rc, output, error = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if rc == 0 else ''
- hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+ hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
hostname = self.choose_hostname(hostname_values)
common['hostname'] = hostname
@@ -196,14 +197,14 @@ class OpenShiftFacts():
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=False,
- etcd_port='4001')
+ etcd_port='4001', portal_net='172.30.17.0/24')
defaults['master'] = master
if 'node' in roles:
node = dict(external_id=common['hostname'], pod_cidr='',
labels={}, annotations={})
- node['resources_cpu'] = hardware_facts['processor_cores']
- node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ node['resources_cpu'] = ansible_facts['processor_cores']
+ node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
defaults['node'] = node
return defaults
@@ -226,8 +227,7 @@ class OpenShiftFacts():
def query_metadata(self, metadata_url, headers=None, expect_json=False):
r, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
- module.fail_json(msg='Failed to query metadata', result=r,
- info=info)
+ raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(r.read())
else:
@@ -252,40 +252,27 @@ class OpenShiftFacts():
def get_provider_metadata(self, metadata_url, supports_recursive=False,
headers=None, expect_json=False):
- if supports_recursive:
- metadata = self.query_metadata(metadata_url, headers, expect_json)
- else:
- metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ try:
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ except OpenShiftFactsMetadataUnavailableError as e:
+ metadata = None
return metadata
- def get_hardware_facts(self):
- if not hasattr(self, 'hardware_facts'):
- self.hardware_facts = Hardware().populate()
- return self.hardware_facts
-
- def get_base_facts(self):
- if not hasattr(self, 'base_facts'):
- self.base_facts = Facts().populate()
- return self.base_facts
-
- def get_virt_facts(self):
- if not hasattr(self, 'virt_facts'):
- self.virt_facts = Virtual().populate()
- return self.virt_facts
-
- def get_net_facts(self):
- if not hasattr(self, 'net_facts'):
- self.net_facts = Network(module).populate()
- return self.net_facts
+ def get_ansible_facts(self):
+ if not hasattr(self, 'ansible_facts'):
+ self.ansible_facts = ansible_facts(module)
+ return self.ansible_facts
def guess_host_provider(self):
# TODO: cloud provider facts should probably be submitted upstream
- virt_facts = self.get_virt_facts()
- hardware_facts = self.get_hardware_facts()
- product_name = hardware_facts['product_name']
- product_version = hardware_facts['product_version']
- virt_type = virt_facts['virtualization_type']
- virt_role = virt_facts['virtualization_role']
+ ansible_facts = self.get_ansible_facts()
+ product_name = ansible_facts['product_name']
+ product_version = ansible_facts['product_version']
+ virt_type = ansible_facts['virtualization_type']
+ virt_role = ansible_facts['virtualization_role']
provider = None
metadata = None
@@ -300,8 +287,9 @@ class OpenShiftFacts():
True)
# Filter sshKeys and serviceAccounts from gce metadata
- metadata['project']['attributes'].pop('sshKeys', None)
- metadata['instance'].pop('serviceAccounts', None)
+ if metadata:
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'ec2'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
@@ -310,12 +298,18 @@ class OpenShiftFacts():
provider = 'openstack'
metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
metadata = self.get_provider_metadata(metadata_url, True, None, True)
- ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
- metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
- # Filter public_keys and random_seed from openstack metadata
- metadata.pop('public_keys', None)
- metadata.pop('random_seed', None)
+ if metadata:
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+
+ if not metadata['ec2_compat']:
+ metadata = None
+
return dict(name=provider, metadata=metadata)
def normalize_provider_facts(self, provider, metadata):
@@ -479,4 +473,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index aa615df39..28bdda618 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,48 +11,96 @@
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+
+# TODO: These values need to be configurable
+- name: Set dns OpenShift facts
+ openshift_facts:
+ role: 'dns'
+ local_facts:
+ ip: "{{ openshift.common.ip }}"
+ domain: local
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Create certificate parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_cert_parent_dir }}"
+ state: directory
+
+- name: Create config parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_master_config | dirname }}"
+ state: directory
+
+# TODO: should probably use a template lookup for this
+# TODO: should allow for setting --etcd, --kubernetes options
+# TODO: recreate config if values change
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create master config
+ command: >
+ /usr/bin/openshift start master --write-config
+ --config={{ openshift_master_config }}
+ --portal-net={{ openshift.master.portal_net }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
+ {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }}
+ {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }}
+ args:
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_master_config }}"
-# TODO: We should pre-generate the master config and point to the generated
-# config rather than setting command line flags here
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
- notify:
- - restart openshift-master
-
-# TODO: should this be populated by a fact based on the deployment type
-# (origin, online, enterprise)?
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-master
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
-- name: Create .kube directory
+- name: Create the OpenShift client config dir(s)
file:
- path: /root/.kube
+ path: "~{{ item }}/.config/openshift"
state: directory
mode: 0700
+ owner: "{{ item }}"
+ group: "{{ item }}"
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
+- name: Create the OpenShift client config(s)
+ command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
args:
- creates: /root/.kube/.kubeconfig
+ creates: ~{{ item }}/.config/openshift/.config
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
new file mode 100644
index 000000000..c52d957ac
--- /dev/null
+++ b/roles/openshift_master/vars/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_master_config: /etc/openshift/master.yaml
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e3c04585b..3d56bdd67 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -13,17 +13,22 @@
failed_when: not result.stat.exists
register: result
with_items:
- - "{{ cert_path }}"
- - "{{ cert_path }}/cert.crt"
- - "{{ cert_path }}/key.key"
- - "{{ cert_path }}/.kubeconfig"
- - "{{ cert_path }}/server.crt"
- - "{{ cert_path }}/server.key"
- - "{{ cert_parent_path }}/ca/cert.crt"
- #- "{{ cert_path }}/node.yaml"
+ - "{{ openshift_node_cert_dir }}"
+ - "{{ openshift_node_cert_dir }}/ca.crt"
+ - "{{ openshift_node_cert_dir }}/client.crt"
+ - "{{ openshift_node_cert_dir }}/client.key"
+ - "{{ openshift_node_cert_dir }}/.kubeconfig"
+ - "{{ openshift_node_cert_dir }}/node-config.yaml"
+ - "{{ openshift_node_cert_dir }}/server.crt"
+ - "{{ openshift_node_cert_dir }}/server.key"
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# --create-certs=false is a temporary workaround until
# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
@@ -32,16 +37,7 @@
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
- notify:
- - restart openshift-node
-
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-node
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
notify:
- restart openshift-node
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
new file mode 100644
index 000000000..c6be83139
--- /dev/null
+++ b/roles/openshift_node/vars/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_node_cert_dir: /etc/openshift/node
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
index 3501e8922..a0befab44 100644
--- a/roles/openshift_register_nodes/defaults/main.yml
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -1,5 +1,2 @@
---
openshift_kube_api_version: v1beta1
-openshift_cert_dir: openshift.local.certificates
-openshift_cert_dir_parent: /var/lib/openshift
-openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index 8ebeb087a..afa9eb27d 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -1,12 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+# fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+# too-few-public-methods
+# no-self-use
+# too-many-arguments
+# too-many-locals
+# too-many-branches
+# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
+"""Ansible module to register a kubernetes node to the cluster"""
import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-from decimal import *
DOCUMENTATION = '''
---
@@ -93,72 +102,170 @@ EXAMPLES = '''
class ClientConfigException(Exception):
+ """Client Configuration Exception"""
pass
-class ClientConfig:
+class ClientConfig(object):
+ """ Representation of a client config
+
+ Attributes:
+ config (dict): dictionary representing the client configuration
+
+ Args:
+ client_opts (list of str): client options to use
+ module (AnsibleModule):
+
+ Raises:
+ ClientConfigException:
+ """
def __init__(self, client_opts, module):
- _, output, error = module.run_command(["/usr/bin/openshift", "ex",
- "config", "view", "-o",
- "json"] + client_opts,
- check_rc = True)
+ kubectl = module.params['kubectl_cmd']
+ _, output, _ = module.run_command((kubectl +
+ ["config", "view", "-o", "json"] +
+ client_opts), check_rc=True)
self.config = json.loads(output)
if not (bool(self.config['clusters']) or
bool(self.config['contexts']) or
bool(self.config['current-context']) or
bool(self.config['users'])):
- raise ClientConfigException(msg="Client config missing required " \
- "values",
- output=output)
+ raise ClientConfigException(
+ "Client config missing required values: %s" % output
+ )
def current_context(self):
+ """ Gets the current context for the client config
+
+ Returns:
+ str: The current context as set in the config
+ """
return self.config['current-context']
def section_has_value(self, section_name, value):
+ """ Test if specified section contains a value
+
+ Args:
+ section_name (str): config section to test
+ value (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
section = self.config[section_name]
if isinstance(section, dict):
return value in section
else:
val = next((item for item in section
- if item['name'] == value), None)
+ if item['name'] == value), None)
return val is not None
def has_context(self, context):
+ """ Test if specified context exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('contexts', context)
def has_user(self, user):
+ """ Test if specified user exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('users', user)
def has_cluster(self, cluster):
+ """ Test if specified cluster exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('clusters', cluster)
def get_value_for_context(self, context, attribute):
+ """ Get the value of attribute in context
+
+ Args:
+ context (str): context to search
+ attribute (str): attribute wanted
+ Returns:
+ str: The value for attribute in context
+ """
contexts = self.config['contexts']
if isinstance(contexts, dict):
return contexts[context][attribute]
else:
return next((c['context'][attribute] for c in contexts
- if c['name'] == context), None)
+ if c['name'] == context), None)
def get_user_for_context(self, context):
+ """ Get the user attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'user')
def get_cluster_for_context(self, context):
+ """ Get the cluster attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'cluster')
-class Util:
+ def get_namespace_for_context(self, context):
+ """ Get the namespace attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
+ return self.get_value_for_context(context, 'namespace')
+
+class Util(object):
+ """Utility methods"""
@staticmethod
def remove_empty_elements(mapping):
+ """ Recursively removes empty elements from a dict
+
+ Args:
+ mapping (dict): dict to remove empty attributes from
+ Returns:
+ dict: A copy of the dict with empty elements removed
+ """
if isinstance(mapping, dict):
- m = mapping.copy()
+ copy = mapping.copy()
for key, val in mapping.iteritems():
if not val:
- del m[key]
- return m
+ del copy[key]
+ return copy
else:
return mapping
-class NodeResources:
+class NodeResources(object):
+ """ Kubernetes Node Resources
+
+ Attributes:
+ resources (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ """
def __init__(self, version, cpu=None, memory=None):
if version == 'v1beta1':
self.resources = dict(capacity=dict())
@@ -166,10 +273,31 @@ class NodeResources:
self.resources['capacity']['memory'] = memory
def get_resources(self):
+ """ Get the dict representing the node resources
+
+ Returns:
+ dict: representation of the node resources with any empty
+ elements removed
+ """
return Util.remove_empty_elements(self.resources)
-class NodeSpec:
- def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
+class NodeSpec(object):
+ """ Kubernetes Node Spec
+
+ Attributes:
+ spec (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ cidr (str): string representation of the cidr block available for
+ the node
+ externalID (str): The external id of the node
+ """
+ def __init__(self, version, cpu=None, memory=None, cidr=None,
+ externalID=None):
if version == 'v1beta3':
self.spec = dict(podCIDR=cidr, externalID=externalID,
capacity=dict())
@@ -177,67 +305,128 @@ class NodeSpec:
self.spec['capacity']['memory'] = memory
def get_spec(self):
+ """ Get the dict representing the node spec
+
+ Returns:
+ dict: representation of the node spec with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.spec)
-class NodeStatus:
- def addAddresses(self, addressType, addresses):
- addressList = []
+class NodeStatus(object):
+ """ Kubernetes Node Status
+
+ Attributes:
+ status (dict): A dictionary representing the node status
+
+ Args:
+ version (str): kubernetes api version
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ hostnames (list, optional): hostnames for the node
+ """
+ def add_addresses(self, address_type, addresses):
+ """ Adds addresses of the specified type
+
+ Args:
+ address_type (str): address type
+ addresses (list): addresses to add
+ """
+ address_list = []
for address in addresses:
- addressList.append(dict(type=addressType, address=address))
- return addressList
+ address_list.append(dict(type=address_type, address=address))
+ return address_list
- def __init__(self, version, externalIPs = [], internalIPs = [],
- hostnames = []):
+ def __init__(self, version, externalIPs=None, internalIPs=None,
+ hostnames=None):
if version == 'v1beta3':
- self.status = dict(addresses = addAddresses('ExternalIP',
- externalIPs) +
- addAddresses('InternalIP',
- internalIPs) +
- addAddresses('Hostname',
- hostnames))
+ addresses = []
+ if externalIPs is not None:
+ addresses += self.add_addresses('ExternalIP', externalIPs)
+ if internalIPs is not None:
+ addresses += self.add_addresses('InternalIP', internalIPs)
+ if hostnames is not None:
+ addresses += self.add_addresses('Hostname', hostnames)
+
+ self.status = dict(addresses=addresses)
def get_status(self):
+ """ Get the dict representing the node status
+
+ Returns:
+ dict: representation of the node status with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.status)
-class Node:
- def __init__(self, module, client_opts, version='v1beta1', name=None,
- hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
- cpu=None, memory=None, labels=dict(), annotations=dict(),
- podCIDR=None, externalID=None):
+class Node(object):
+ """ Kubernetes Node
+
+ Attributes:
+ status (dict): A dictionary representing the node
+
+ Args:
+ module (AnsibleModule):
+ client_opts (list): client connection options
+ version (str, optional): kubernetes api version
+ node_name (str, optional): name for node
+ hostIP (str, optional): node host ip
+ hostnames (list, optional): hostnames for the node
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ cpu (str, optional): cpu resources for the node
+ memory (str, optional): memory resources for the node
+ labels (list, optional): labels for the node
+ annotations (list, optional): annotations for the node
+ podCIDR (list, optional): cidr block to use for pods
+ externalID (str, optional): external id of the node
+ """
+ def __init__(self, module, client_opts, version='v1beta1', node_name=None,
+ hostIP=None, hostnames=None, externalIPs=None,
+ internalIPs=None, cpu=None, memory=None, labels=None,
+ annotations=None, podCIDR=None, externalID=None):
self.module = module
self.client_opts = client_opts
if version == 'v1beta1':
- self.node = dict(id = name,
- kind = 'Node',
- apiVersion = version,
- hostIP = hostIP,
- resources = NodeResources(version, cpu, memory),
- cidr = podCIDR,
- labels = labels,
- annotations = annotations,
- externalID = externalID
- )
+ self.node = dict(id=node_name,
+ kind='Node',
+ apiVersion=version,
+ hostIP=hostIP,
+ resources=NodeResources(version, cpu, memory),
+ cidr=podCIDR,
+ labels=labels,
+ annotations=annotations,
+ externalID=externalID)
elif version == 'v1beta3':
- metadata = dict(name = name,
- labels = labels,
- annotations = annotations
- )
- self.node = dict(kind = 'Node',
- apiVersion = version,
- metadata = metadata,
- spec = NodeSpec(version, cpu, memory, podCIDR,
- externalID),
- status = NodeStatus(version, externalIPs,
- internalIPs, hostnames),
- )
+ metadata = dict(name=node_name,
+ labels=labels,
+ annotations=annotations)
+ self.node = dict(kind='Node',
+ apiVersion=version,
+ metadata=metadata,
+ spec=NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status=NodeStatus(version, externalIPs,
+ internalIPs, hostnames))
def get_name(self):
+ """ Get the name for the node
+
+ Returns:
+ str: node name
+ """
if self.node['apiVersion'] == 'v1beta1':
return self.node['id']
elif self.node['apiVersion'] == 'v1beta3':
return self.node['name']
def get_node(self):
+ """ Get the dict representing the node
+
+ Returns:
+ dict: representation of the node with any empty elements
+ removed
+ """
node = self.node.copy()
if self.node['apiVersion'] == 'v1beta1':
node['resources'] = self.node['resources'].get_resources()
@@ -247,52 +436,82 @@ class Node:
return Util.remove_empty_elements(node)
def exists(self):
- _, output, error = self.module.run_command(["/usr/bin/osc", "get",
- "nodes"] + self.client_opts,
- check_rc = True)
+ """ Tests if the node already exists
+
+ Returns:
+ bool: True if node exists, otherwise False
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] +
+ self.client_opts),
+ check_rc=True)
if re.search(self.module.params['name'], output, re.MULTILINE):
return True
return False
def create(self):
- cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
- rc, output, error = self.module.run_command(cmd,
- data=self.module.jsonify(self.get_node()))
- if rc != 0:
+ """ Creates the node
+
+ Returns:
+ bool: True if node creation successful
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ cmd = kubectl + self.client_opts + ['create', '-f', '-']
+ exit_code, output, error = self.module.run_command(
+ cmd, data=self.module.jsonify(self.get_node())
+ )
+ if exit_code != 0:
if re.search("minion \"%s\" already exists" % self.get_name(),
error):
- self.module.exit_json(changed=False,
- msg="node definition already exists",
- node=self.get_node())
+ self.module.exit_json(msg="node definition already exists",
+ changed=False, node=self.get_node())
else:
- self.module.fail_json(msg="Node creation failed.", rc=rc,
- output=output, error=error,
- node=self.get_node())
+ self.module.fail_json(msg="Node creation failed.",
+ exit_code=exit_code,
+ output=output, error=error,
+ node=self.get_node())
else:
return True
def main():
+ """ main """
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True, type = 'str'),
- host_ip = dict(type = 'str'),
- hostnames = dict(type = 'list', default = []),
- external_ips = dict(type = 'list', default = []),
- internal_ips = dict(type = 'list', default = []),
- api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
- choices = ['v1beta1', 'v1beta3']),
- cpu = dict(type = 'str'),
- memory = dict(type = 'str'),
- labels = dict(type = 'dict', default = {}), # TODO: needs documented
- annotations = dict(type = 'dict', default = {}), # TODO: needs documented
- pod_cidr = dict(type = 'str'), # TODO: needs documented
- external_id = dict(type = 'str'), # TODO: needs documented
- client_config = dict(type = 'str'), # TODO: needs documented
- client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ hostnames=dict(type='list', default=[]),
+ external_ips=dict(type='list', default=[]),
+ internal_ips=dict(type='list', default=[]),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ external_id=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:openshift-client'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
),
- mutually_exclusive = [
+ mutually_exclusive=[
['host_ip', 'external_ips'],
['host_ip', 'internal_ips'],
['host_ip', 'hostnames'],
@@ -300,7 +519,10 @@ def main():
supports_check_mode=True
)
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ client_config = '~/.kube/.kubeconfig'
+ if 'default_client_config' in module.params:
+ client_config = module.params['default_client_config']
+ user_has_client_config = os.path.exists(os.path.expanduser(client_config))
if not (user_has_client_config or module.params['client_config']):
module.fail_json(msg="Could not locate client configuration, "
"client_config must be specified if "
@@ -308,12 +530,17 @@ def main():
client_opts = []
if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+ kubeconfig_flag = '--kubeconfig'
+ if 'kubeconfig_flag' in module.params:
+ kubeconfig_flag = module.params['kubeconfig_flag']
+ client_opts.append(kubeconfig_flag + '=' +
+ os.path.expanduser(module.params['client_config']))
try:
config = ClientConfig(client_opts, module)
- except ClientConfigException as e:
- module.fail_json(msg="Failed to get client configuration", exception=e)
+ except ClientConfigException as ex:
+ module.fail_json(msg="Failed to get client configuration",
+ exception=str(ex))
client_context = module.params['client_context']
if config.has_context(client_context):
@@ -333,14 +560,16 @@ def main():
client_cluster = module.params['client_cluster']
if config.has_cluster(client_cluster):
- if client_cluster != config.get_cluster_for_context(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_context):
client_opts.append("--cluster=%s" % client_cluster)
else:
module.fail_json(msg="Cluster %s not found in client config" %
client_cluster)
- # TODO: provide sane defaults for some (like hostname, externalIP,
- # internalIP, etc)
+ client_namespace = module.params['client_namespace']
+ if client_namespace != config.get_namespace_for_context(client_context):
+ client_opts.append("--namespace=%s" % client_namespace)
+
node = Node(module, client_opts, module.params['api_version'],
module.params['name'], module.params['host_ip'],
module.params['hostnames'], module.params['external_ips'],
@@ -364,7 +593,8 @@ def main():
module.fail_json(msg="Unknown error creating node",
node=node.get_node())
-
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index 7319b88b1..d4d72d126 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -3,53 +3,44 @@
# TODO: recreate master/node configs if settings that affect the configs
# change (hostname, public_hostname, ip, public_ip, etc)
-# TODO: create a failed_when condition
-- name: Create node server certificates
- command: >
- /usr/bin/openshift admin create-server-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
- --hostnames={{ [item.openshift.common.hostname,
- item.openshift.common.public_hostname]|unique|join(",") }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: openshift_nodes
- register: server_cert_result
+# TODO: use a template lookup here
# TODO: create a failed_when condition
-- name: Create node client certificates
- command: >
- /usr/bin/openshift admin create-node-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --node-name={{ item.openshift.common.hostname }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
- with_items: openshift_nodes
- register: node_cert_result
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-# TODO: create a failed_when condition
-- name: Create kubeconfigs for nodes
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create node config
command: >
- /usr/bin/openshift admin create-kubeconfig
- --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ /usr/bin/openshift admin create-node-config
+ --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
+ --node={{ item.openshift.common.hostname }}
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
+ --dns-domain={{ openshift.dns.domain }}
+ --dns-ip={{ openshift.dns.ip }}
+ --master={{ openshift.master.api_url }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --signer-serial={{ openshift_master_ca_dir }}/serial.txt
+ --node-client-certificate-authority={{ openshift_master_ca_cert }}
+ {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
+ --listen=https://0.0.0.0:10250
args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
with_items: openshift_nodes
- register: kubeconfig_result
- name: Register unregistered nodes
kubernetes_register_node:
- client_user: openshift-client
+ kubectl_cmd: ['osc']
+ default_client_config: '~/.config/openshift/.config'
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
@@ -61,7 +52,5 @@
external_id: "{{ item.openshift.node.external_id }}"
# TODO: support customizing other attributes such as: client_config,
# client_cluster, client_context, client_user
- # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
- # internal_ips, external_id
with_items: openshift_nodes
register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
new file mode 100644
index 000000000..bd497f08f
--- /dev/null
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -0,0 +1,7 @@
+---
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 6713e11fc..6bbedd839 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | |
|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | online | Possible values enterprise, origin, online |
+| openshift_deployment_type | None | Possible values enterprise, origin, online |
| openshift_additional_repos | {} | TODO |
Dependencies
diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 1730207f4..7c5a14cd7 100644
--- a/roles/openshift_repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,7 +1,2 @@
---
-# TODO: once we are able to configure/deploy origin using the openshift roles,
-# then we should default to origin
-
-# TODO: push the defaulting of these values to the openshift_facts module
-openshift_deployment_type: online
openshift_additional_repos: {}
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
deleted file mode 100644
index 7b40671a4..000000000
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
+++ /dev/null
@@ -1,61 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT
-kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A
-BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo
-gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P
-xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D
-FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7
-Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i
-QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm
-G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt
-0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR
-fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB
-tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv
-bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT
-ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy
-6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ
-OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6
-0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc
-MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u
-QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE
-Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6
-DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0
-B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH
-V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT
-CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ==
-=21pb
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG
-package. Questions about this key should be sent to security@redhat.com.
-
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp
-Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd
-LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi
-UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe
-II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW
-QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz
-+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1
-VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI
-mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg
-SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX
-BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4
-F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF
-AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q
-0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc
-RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI
-JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR
-xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU
-ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5
-WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI
-RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL
-yACfb68fBd2pWEzLKsOk9imIobHHpzE=
-=gpIn
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
deleted file mode 100644
index 0f83b622d..000000000
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
+++ /dev/null
@@ -1,63 +0,0 @@
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is used for packages in Red Hat
-products shipped after November 2009, and for all updates to those
-products.
-
-Questions about this key should be sent to security@redhat.com.
-
-pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) <security@redhat.com>
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF
-0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF
-0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c
-u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh
-XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H
-5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW
-9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj
-/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1
-PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY
-HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF
-buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB
-tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0
-LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
-CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC
-2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf
-C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5
-un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E
-0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE
-IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh
-8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL
-Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki
-JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25
-OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq
-dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==
-=zbHE
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is a supporting (auxiliary) key for
-Red Hat products shipped after November 2006 and for all updates to
-those products.
-
-Questions about this key should be sent to security@redhat.com.
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT
-VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A
-UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C
-yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu
-MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg
-Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z
-z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2
-eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq
-SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg
-SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC
-AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg
-ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX
-=d/bm
------END PGP PUBLIC KEY BLOCK-----
-
diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo
deleted file mode 100644
index 1deae2939..000000000
--- a/roles/openshift_repos/files/online/epel7-kubernetes.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-epel7-kubernetes]
-name=Copr repo for epel7-kubernetes owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo
deleted file mode 100644
index c7629872d..000000000
--- a/roles/openshift_repos/files/online/epel7-openshift.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-origin-next]
-name=Copr repo for origin-next owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
-skip_if_unavailable=False
-gpgcheck=0
-enabled=1
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
deleted file mode 100644
index cfe41f691..000000000
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
+++ /dev/null
@@ -1,23 +0,0 @@
-[oso-rhui-rhel-server-extras]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-extras-htb]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
-enabled=0
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
deleted file mode 100644
index ddc93193d..000000000
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
+++ /dev/null
@@ -1,21 +0,0 @@
-[oso-rhui-rhel-server-releases]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-releases-optional]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
new file mode 100644
index 000000000..d324c142a
--- /dev/null
+++ b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
@@ -0,0 +1,10 @@
+[enterprise-v3]
+name=OpenShift Enterprise Beta3
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta3/
+ https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta3/
+enabled=1
+gpgcheck=0
+failovermethod=priority
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem \ No newline at end of file
diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
new file mode 100644
index 000000000..0b21e0a65
--- /dev/null
+++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
@@ -0,0 +1,7 @@
+[maxamillion-origin-next]
+name=Copr repo for origin-next owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
+enabled=1
diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index bb1551d37..12e98b7a1 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -10,10 +10,6 @@
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
-# TODO: remove this when origin support actually works
-- fail: msg="OpenShift Origin support is not currently enabled"
- when: openshift_deployment_type == 'origin'
-
- name: Ensure libselinux-python is installed
yum:
pkg: libselinux-python
@@ -36,17 +32,15 @@
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- - '*/*'
- when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+ - '*/repos/*'
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | match("RPM-GPG-KEY-")
+ - "{{ openshift_deployment_type }}/gpg_keys/*"
- name: Configure yum repositories
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | search(".*\.repo$")
+ - "{{ openshift_deployment_type }}/repos/*"
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..2d9243545 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -1,4 +1,3 @@
-# {{ ansible_managed }}
{% for repo in openshift_additional_repos %}
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index f2d61043b..77e7a80ba 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -12,12 +12,21 @@
yum:
pkg: openshift-sdn-master
state: installed
+ register: install_result
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-master settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
+ -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
+ -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
+ -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
notify:
- restart openshift-sdn-master
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index 729c28879..37a30d019 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -9,9 +9,15 @@
yum:
pkg: openshift-sdn-node
state: installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# TODO: we are specifying -hostname= for OPTIONS as a workaround for
# openshift-sdn-node not properly detecting the hostname.
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-node settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-node
@@ -20,17 +26,33 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
+ -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
+ -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
+ -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- regex: '^(MASTER_URL=)'
line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
line: '\1"{{ openshift.common.ip }}"'
- # TODO lock down the insecure-registry config to a more sane value than
- # 0.0.0.0/0
- - regex: '^(DOCKER_OPTIONS=)'
- line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
notify: restart openshift-sdn-node
+- name: Ensure we aren't setting DOCKER_OPTIONS in /etc/sysconfig/openshift-sdn-node
+ lineinfile:
+ dest: /etc/sysconfig/openshift-sdn-node
+ regexp: '^DOCKER_OPTIONS='
+ state: absent
+ notify: restart openshift-sdn-node
+
+# TODO lock down the insecure-registry config to a more sane value than
+# 0.0.0.0/0
+- name: Configure docker insecure-registry setting
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: INSECURE_REGISTRY=
+ line: INSECURE_REGISTRY='--insecure-registry=0.0.0.0/0'
+ notify: restart openshift-sdn-node
+
+
- name: Start and enable openshift-sdn-node
service:
name: openshift-sdn-node
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 90588d2ae..1cb539a8c 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-
+# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
@@ -17,6 +17,7 @@ EXAMPLES = '''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
+ super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
@@ -36,13 +37,14 @@ class IpTablesSaveError(IpTablesError):
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
- super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code, output)
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
+ super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
+ output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -51,7 +53,7 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
-class IpTablesManager:
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
@@ -68,10 +70,10 @@ class IpTablesManager:
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
@@ -93,13 +95,13 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
- "%s %s" % (self.proto, self.port),
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ "%s %s" % (proto, port),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
@@ -113,15 +115,15 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
- raise IpTablesRemoveChainError(
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
- return True if subprocess.call(check_cmd) == 0 else False
+ return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
@@ -137,7 +139,7 @@ class IpTablesManager:
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
- input_rules = map(lambda s: s.split(), output.split('\n'))
+ input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
@@ -150,42 +152,38 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid rule
- if not last_rule_num or not last_rule_target:
- raise IpTablesCreateJumpRuleError(
- chain=self.chain,
- msg="Failed to find existing %s rules" % self.jump_rule_chain,
- cmd=None, exit_code=None, output=None)
-
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
- if last_rule_target == 'REJECT':
+ if (last_rule_num and last_rule_target
+ and last_rule_target == 'REJECT'):
# insert rule
- cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain,
+ str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
- changed = True
+ self.changed = True
self.output.append(output)
self.save()
- except subprocess.CalledProcessError as e:
- if '--line-numbers' in e.cmd:
+ except subprocess.CalledProcessError as ex:
+ if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing %s rules to " % self.jump_rule_chain +
- "determine jump rule location",
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to query existing " +
+ self.jump_rule_chain +
+ " rules to determine jump rule location"),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to create jump rule for chain %s" %
- self.chain,
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to create jump rule for chain " +
+ self.chain),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def create_chain(self):
if self.check_mode:
@@ -200,27 +198,26 @@ class IpTablesManager:
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
- cmd=e.cmd, exit_code=e.returncode, output=e.output
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
- def gen_save_cmd(self):
- cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
@@ -228,7 +225,8 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+ action=dict(required=True, choices=['add', 'remove',
+ 'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
@@ -261,13 +259,15 @@ def main():
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
- except IpTablesError as e:
- module.fail_json(msg=e.msg)
+ except IpTablesError as ex:
+ module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index b6bddd5c5..5089eb3e0 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -44,6 +44,7 @@
- iptables
- ip6tables
when: pkg_check.rc == 0
+ ignore_errors: yes
# TODO: Ansible 1.9 will eliminate the need for separate firewalld tasks for
# enabling rules and making them permanent with the immediate flag
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 7b5c00a9b..9af9d8d29 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -42,6 +42,7 @@
register: result
changed_when: "'firewalld' in result.stdout"
when: pkg_check.rc == 0
+ ignore_errors: yes
- name: Add iptables allow rules
os_firewall_manage_iptables: