summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--filter_plugins/oo_filters.py11
-rw-r--r--openshift-ansible.spec1
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml10
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml9
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml1
-rw-r--r--roles/kube_nfs_volumes/README.md3
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml13
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 (renamed from roles/kube_nfs_volumes/templates/nfs.json.j2)0
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py39
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py4
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master_ca/tasks/main.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml23
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml25
-rw-r--r--utils/docs/config.md3
-rw-r--r--utils/src/ooinstall/cli_installer.py38
-rw-r--r--utils/src/ooinstall/oo_config.py22
-rw-r--r--utils/src/ooinstall/openshift_ansible.py4
25 files changed, 156 insertions, 81 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index f494c0ae5..f4643270d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -377,14 +377,9 @@ class FilterModule(object):
else:
certificate['names'] = []
- if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
- # Unable to find cert/key, try to prepend data_dir to paths
- certificate['certfile'] = os.path.join(data_dir, certificate['certfile'])
- certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile'])
- if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
- # Unable to find cert/key in data_dir
- raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
- (certificate['certfile'], certificate['keyfile']))
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
try:
st_cert = open(certificate['certfile'], 'rt').read()
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index df3418278..10a53d921 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -104,6 +104,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
%files bin
%{_bindir}/*
+%exclude %{_bindir}/atomic-openshift-installer
%{python_sitelib}/openshift_ansible/
/etc/bash_completion.d/*
%config(noreplace) /etc/openshift_ansible/
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index a8e3e27bb..5aa6b0f9b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -11,6 +11,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index a787ba0d3..2bb69614f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -4,17 +4,21 @@
gather_facts: no
tasks:
- fail:
- msg: This playbook rquires g_etcd_group to be set
+ msg: This playbook requires g_etcd_group to be set
when: g_etcd_group is not defined
- fail:
- msg: This playbook rquires g_masters_group to be set
+ msg: This playbook requires g_masters_group to be set
when: g_masters_group is not defined
- fail:
- msg: This playbook rquires g_nodes_group to be set
+ msg: This playbook requires g_nodes_group to be set
when: g_nodes_group is not defined
+ - fail:
+ msg: This playbook requires g_lb_group to be set
+ when: g_lb_group is not defined
+
- name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
index 201320de8..6d2777732 100644
--- a/playbooks/common/openshift-cluster/scaleup.yml
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -1,7 +1,16 @@
---
- include: evaluate_groups.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
- include: ../openshift-node/config.yml
vars:
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 6ca4f7395..745161bcb 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -16,6 +16,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index c208eee81..4d1ae22ff 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -15,6 +15,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index a5ee2d6a5..888804e28 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -10,6 +10,7 @@
- include: ../../common/openshift-cluster/config.yml
vars:
g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 56c69c286..1520f79b2 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443
# Token to use for authentication to the API server
kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+# API Version to use for kubernetes
+kube_api_version: v1
```
## Dependencies
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
index e296492f9..bdd994d07 100644
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -1,4 +1,10 @@
---
+kubernetes_url: https://172.30.0.1:443
+
+kube_api_version: v1
+
+kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
+
# Options of NFS exports.
nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index f4a506234..d1dcf261a 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -16,10 +16,11 @@
- include: nfs.yml
- name: export physical volumes
- uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
- method=POST
- body='{{ lookup("template", "../templates/nfs.json.j2") }}'
- body_format=json
- status_code=201
- HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ uri:
+ url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
+ method: POST
+ body: "{{ lookup('template', kube_req_template) }}"
+ body_format: json
+ status_code: 201
+ HEADER_Authorization: "Bearer {{ kubernetes_token }}"
with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
new file mode 120000
index 000000000..49c1191bc
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
@@ -0,0 +1 @@
+../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
index b42886ef1..b42886ef1 100644
--- a/roles/kube_nfs_volumes/templates/nfs.json.j2
+++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index e7fd6fa21..43498c015 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -67,7 +67,24 @@ def get_template(zapi, template_name):
return None
return content['result'][0]
-def get_type(ztype):
+def get_multiplier(inval):
+ ''' Determine the multiplier
+ '''
+ if inval == None or inval == '':
+ return None, 0
+
+ rval = None
+ try:
+ rval = int(inval)
+ except ValueError:
+ pass
+
+ if rval:
+ return rval, 1
+
+ return rval, 0
+
+def get_zabbix_type(ztype):
'''
Determine which type of discoverrule this is
'''
@@ -87,6 +104,7 @@ def get_type(ztype):
'telnet': 14,
'calculated': 15,
'JMX': 16,
+ 'SNMP trap': 17,
}
for typ in _types.keys():
@@ -153,16 +171,21 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
description=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
- ztype=dict(default='trapper', type='str'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
status=dict(default='enabled', type='str'),
applications=dict(default=[], type='list'),
- template_name=dict(default=None, type='str'),
discoveryrule_key=dict(default=None, type='str'),
+ interval=dict(default=60, type='int'),
+ delta=dict(default=0, type='int'),
+ multiplier=dict(default=None, type='str'),
+ units=dict(default=None, type='str'),
+
),
#supports_check_mode=True
)
@@ -205,15 +228,23 @@ def main():
# Create and Update
if state == 'present':
+
+ formula, use_multiplier = get_multiplier(module.params['multiplier'])
+
params = {'name': module.params['name'],
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
- 'type': get_type(module.params['ztype']),
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
+ 'formula': formula,
+ 'multiplier': use_multiplier,
'description': module.params['description'],
+ 'units': module.params['units'],
+ 'delay': module.params['interval'],
+ 'delta': module.params['delta'],
}
if params['type'] in [2, 5, 7, 8, 11, 15]:
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index ac9cf756b..44c4e6766 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -84,6 +84,10 @@
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
+ multiplier: "{{ item.multiplier | default('', True) }}"
+ units: "{{ item.units | default('', True) }}"
+ interval: "{{ item.interval | default(60, True) }}"
+ delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitemprototypes
when: template.zitemprototypes is defined
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 134734a65..33aca987d 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -517,7 +517,7 @@ def set_aggregate_facts(facts):
facts['master']['etcd_data_dir'] = '/var/lib/etcd'
facts['common']['all_hostnames'] = list(all_hostnames)
- facts['common']['internal_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
@@ -551,6 +551,8 @@ def set_deployment_facts_if_unset(facts):
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
+ if deployment_type in ['enterprise', 'online']:
+ data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
for role in ('master', 'node'):
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index d4a6590ea..faf625e3c 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,5 +1,5 @@
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
- v1beta3
{% endif %}
- v1
@@ -80,11 +80,11 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
- v1beta3
-{% endif %}
- v1
+{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
masterCount: {{ openshift.master.master_count }}
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 0738048d3..314f068e7 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Install the base package for admin tooling
- yum: pkg={{ openshift.common.service_type }} state=present
+ yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
register: install_result
- name: Reload generated facts
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index e966e793e..13e5d7a4b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -6,13 +6,9 @@
mode: 0700
with_items: masters_needing_certs
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: hard
- with_nested:
- - masters_needing_certs
- - - ca.crt
+- set_fact:
+ master_certificates:
+ - ca.crt
- ca.key
- ca.serial.txt
- admin.crt
@@ -20,8 +16,6 @@
- admin.kubeconfig
- master.kubelet-client.crt
- master.kubelet-client.key
- - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- openshift-master.crt
- openshift-master.key
- openshift-master.kubeconfig
@@ -33,6 +27,17 @@
- openshift-router.kubeconfig
- serviceaccounts.private.key
- serviceaccounts.public.key
+ master_31_certificates:
+ - master.proxy-client.crt
+ - master.proxy-client.key
+
+- file:
+ src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
+ dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: hard
+ with_nested:
+ - masters_needing_certs
+ - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}"
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index c455a09f1..7525c12f6 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -45,15 +45,6 @@
register: sdn_install_result
when: openshift.common.use_openshift_sdn
-- name: Install Node package
- yum: pkg={{ openshift.common.service_type }}-node state=present
- register: node_install_result
-
-- name: Install sdn-ovs package
- yum: pkg={{ openshift.common.service_type }}-sdn-ovs state=present
- register: sdn_install_result
- when: openshift.common.use_openshift_sdn
-
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index fbc20cd63..04665be62 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -199,6 +199,11 @@ g_template_os_linux:
lifetime: 1
description: "Dynamically register disks on a node"
+ - name: disc.network
+ key: disc.network
+ lifetime: 1
+ description: "Dynamically register network interfaces on a node"
+
zitemprototypes:
- discoveryrule_key: disc.filesys
name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -232,6 +237,26 @@ g_template_os_linux:
applications:
- Disk
+ - discoveryrule_key: disc.network
+ name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
ztriggerprototypes:
- name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
diff --git a/utils/docs/config.md b/utils/docs/config.md
index ee4b157c9..2729f8d37 100644
--- a/utils/docs/config.md
+++ b/utils/docs/config.md
@@ -19,16 +19,19 @@ hosts:
master: true
node: true
containerized: true
+ connect_to: 24.222.0.1
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
node: true
+ connect_to: 10.0.0.2
- ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ connect_to: 10.0.0.3
```
## Primary Settings
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 0d65f2053..a40ff5cfc 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -101,18 +101,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
- ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')
-
while more_hosts:
host_props = {}
hostname_or_ip = click.prompt('Enter hostname or IP address:',
default='',
value_proc=validate_prompt_hostname)
- if ip_regex.match(hostname_or_ip):
- host_props['ip'] = hostname_or_ip
- else:
- host_props['hostname'] = hostname_or_ip
+ host_props['connect_to'] = hostname_or_ip
host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
host_props['node'] = True
@@ -150,7 +145,7 @@ Plese confirm that they are correct before moving forward.
notes = """
Format:
-IP,public IP,hostname,public hostname
+connect_to,IP,public IP,hostname,public hostname
Notes:
* The installation host is the hostname from the installer's perspective.
@@ -168,16 +163,15 @@ Notes:
default_facts_lines = []
default_facts = {}
- validated_facts = {}
for h in hosts:
- default_facts[h] = {}
- h.ip = callback_facts[str(h)]["common"]["ip"]
- h.public_ip = callback_facts[str(h)]["common"]["public_ip"]
- h.hostname = callback_facts[str(h)]["common"]["hostname"]
- h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"]
-
- validated_facts[h] = {}
- default_facts_lines.append(",".join([h.ip,
+ default_facts[h.connect_to] = {}
+ h.ip = callback_facts[h.connect_to]["common"]["ip"]
+ h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
+ h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
+ h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
+
+ default_facts_lines.append(",".join([h.connect_to,
+ h.ip,
h.public_ip,
h.hostname,
h.public_hostname]))
@@ -316,10 +310,10 @@ Add new nodes here
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
for host in hosts:
- if(host.name in callback_facts.keys()
- and 'common' in callback_facts[host.name].keys()
- and callback_facts[host.name]['common'].get('version', '')
- and callback_facts[host.name]['common'].get('version', '') != 'None'):
+ if(host.connect_to in callback_facts.keys()
+ and 'common' in callback_facts[host.connect_to].keys()
+ and callback_facts[host.connect_to]['common'].get('version', '')
+ and callback_facts[host.connect_to]['common'].get('version', '') != 'None'):
installed_hosts.append(host)
return installed_hosts
@@ -481,7 +475,7 @@ def uninstall(ctx):
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
click.echo("Uninstall cancelled.")
@@ -512,7 +506,7 @@ def upgrade(ctx):
old_variant, old_version, oo_cfg.settings['variant'],
oo_cfg.settings['variant_version']))
for host in oo_cfg.hosts:
- click.echo(" * %s" % host.name)
+ click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 4281947f1..f35a8f51b 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -35,6 +35,7 @@ class Host(object):
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
+ self.connect_to = kwargs.get('connect_to', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
@@ -43,30 +44,25 @@ class Host(object):
self.node = kwargs.get('node', False)
self.containerized = kwargs.get('containerized', False)
- if self.ip is None and self.hostname is None:
- raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'")
+ if self.connect_to is None:
+ raise OOConfigInvalidHostError("You must specify either and 'ip' " \
+ "or 'hostname' to connect to.")
if self.master is False and self.node is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
- # Hosts can be specified with an ip, hostname, or both. However we need
- # something authoritative we can connect to and refer to the host by.
- # Preference given to the IP if specified as this is more specific.
- # We know one must be set by this point.
- self.name = self.ip if self.ip is not None else self.hostname
-
def __str__(self):
- return self.name
+ return self.connect_to
def __repr__(self):
- return self.name
+ return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized']:
+ 'master', 'node', 'containerized', 'connect_to']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
@@ -182,7 +178,7 @@ class OOConfig(object):
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
- result[host.name] = missing_facts
+ result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
@@ -214,6 +210,6 @@ class OOConfig(object):
def get_host(self, name):
for host in self.hosts:
- if host.name == name:
+ if host.connect_to == name:
return host
return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 153415e8c..bac4951d5 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -79,7 +79,7 @@ def write_host(host, inventory, scheduleable=True):
if not scheduleable:
facts += ' openshift_scheduleable=False'
installer_host = socket.gethostname()
- if host.hostname == installer_host or host.public_hostname == installer_host:
+ if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
@@ -88,7 +88,7 @@ def write_host(host, inventory, scheduleable=True):
sys.exit(1)
facts += ' ansible_become=true'
- inventory.write('{} {}\n'.format(host, facts))
+ inventory.write('{} {}\n'.format(host.connect_to, facts))
def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):