summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--openshift-ansible.spec23
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml4
-rw-r--r--playbooks/common/openshift-master/restart.yml10
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml2
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml3
-rw-r--r--roles/chrony/README.md31
-rw-r--r--roles/chrony/defaults/main.yml2
-rw-r--r--roles/chrony/handlers/main.yml5
-rw-r--r--roles/chrony/meta/main.yml18
-rw-r--r--roles/chrony/tasks/main.yml30
-rw-r--r--roles/chrony/templates/chrony.conf.j245
-rw-r--r--roles/chrony/vars/main.yml2
-rw-r--r--roles/lib_timedatectl/library/timedatectl.py74
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml1
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py9
-rw-r--r--roles/os_zabbix/vars/template_docker.yml2
-rw-r--r--roles/oso_monitoring_tools/README.md54
-rw-r--r--roles/oso_monitoring_tools/defaults/main.yml2
-rw-r--r--roles/oso_monitoring_tools/handlers/main.yml2
-rw-r--r--roles/oso_monitoring_tools/meta/main.yml8
-rw-r--r--roles/oso_monitoring_tools/tasks/main.yml18
-rw-r--r--roles/oso_monitoring_tools/vars/main.yml12
-rw-r--r--utils/src/ooinstall/cli_installer.py15
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
26 files changed, 350 insertions, 27 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index b94e78aea..4ec54c846 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.33-1 ./
+3.0.35-1 ./
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 1cc350b81..4d00c655b 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.33
+Version: 3.0.35
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -259,6 +259,27 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.35-1
+- added the lib_timedate role (mwoodson@redhat.com)
+- added chrony (mwoodson@redhat.com)
+- added oso_moniotoring tools role (mwoodson@redhat.com)
+- Improve pacemaker 'is-active' check. (abutcher@redhat.com)
+
+* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1
+- clean up too-many-branches / logic (jdiaz@redhat.com)
+- atomic-openshift-installer: add containerized to inventory
+ (smunilla@redhat.com)
+- Add 'unknown' to possible output for the is-active check.
+ (abutcher@redhat.com)
+- Fix cluster_method conditional in master restart playbook.
+ (abutcher@redhat.com)
+- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com)
+- atomic-openshift-installer: Remove containerized install for 3.0
+ (smunilla@redhat.com)
+- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com)
+- Remove pause after haproxy start (abutcher@redhat.com)
+- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com)
+
* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1
- Configure nodes which are also masters prior to nodes in containerized
install. (abutcher@redhat.com)
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index 174cea460..d24e9cafa 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -20,7 +20,7 @@
# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
#
# Notes:
-# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
# * This does a GP2 by default. Support for Provisioned IOPS has not been added
# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
# * This can be done with NO downtime on the host
@@ -36,7 +36,7 @@
vars:
cli_volume_type: gp2
- cli_volume_size: 55
+ cli_volume_size: 200
# cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index d9d857b1a..052892863 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -68,14 +68,22 @@
- name: Determine which masters are currently active
hosts: oo_masters_to_config
+ any_errors_fatal: true
tasks:
- name: Check master service status
command: >
systemctl is-active {{ openshift.common.service_type }}-master
register: active_check_output
when: openshift.master.cluster_method | default(None) == 'pacemaker'
- failed_when: active_check_output.stdout not in ['active', 'inactive', 'unknown']
+ failed_when: false
changed_when: false
+ # Any master which did not report 'active' or 'inactive' is likely
+ # unhealthy. Other possible states are 'unknown' or 'failed'.
+ - fail:
+ msg: >
+ Got invalid service state from {{ openshift.common.service_type }}-master
+ on {{ inventory_hostname }}. Please verify pacemaker cluster.
+ when: openshift.master.cluster_method | default(None) == 'pacemaker' and active_check_output.stdout not in ['active', 'inactive']
- set_fact:
is_active: "{{ active_check_output.stdout == 'active' }}"
when: openshift.master.cluster_method | default(None) == 'pacemaker'
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 3231ecc8e..84a3f84d4 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -13,4 +13,3 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- openshift_use_openshift_sdn: "{{ do_we_use_openshift_sdn }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
index acf5e5110..75343dffa 100644
--- a/playbooks/gce/openshift-cluster/join_node.yml
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -48,6 +48,4 @@
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ansible_default_ipv4.address }}"
- openshift_use_openshift_sdn: true
openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
- os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index 7fb13c7a6..f004a9e6b 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,8 +1,5 @@
---
-do_we_use_openshift_sdn: true
-sdn_network_plugin: redhat/openshift-ovs-subnet
debug_level: 2
-# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
deployment_rhel7_ent_base:
image: rhel-7
diff --git a/roles/chrony/README.md b/roles/chrony/README.md
new file mode 100644
index 000000000..bf15d9669
--- /dev/null
+++ b/roles/chrony/README.md
@@ -0,0 +1,31 @@
+Role Name
+=========
+
+A role to configure chrony as the ntp client
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+chrony_ntp_servers: a list of ntp servers to use the chrony.conf file
+
+Dependencies
+------------
+
+roles/lib_timedatectl
+
+Example Playbook
+----------------
+
+License
+-------
+
+Apache 2.0
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml
new file mode 100644
index 000000000..95576e666
--- /dev/null
+++ b/roles/chrony/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for chrony
diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml
new file mode 100644
index 000000000..1973c79e2
--- /dev/null
+++ b/roles/chrony/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart chronyd
+ service:
+ name: chronyd
+ state: restarted
diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml
new file mode 100644
index 000000000..85595d7c3
--- /dev/null
+++ b/roles/chrony/meta/main.yml
@@ -0,0 +1,18 @@
+---
+galaxy_info:
+ author: Openshift Operations
+ description: Configure chrony as an ntp server
+ company: Red Hat
+ license: Apache 2.0
+ min_ansible_version: 1.9.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - system
+dependencies:
+- roles/lib_timedatectl
diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml
new file mode 100644
index 000000000..fae6d8e4c
--- /dev/null
+++ b/roles/chrony/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: remove ntp package
+ yum:
+ name: ntp
+ state: absent
+
+- name: ensure chrony package is installed
+ yum:
+ name: chrony
+ state: installed
+
+- name: Install /etc/chrony.conf
+ template:
+ src: chrony.conf.j2
+ dest: /etc/chrony.conf
+ owner: root
+ group: root
+ mode: 0644
+ notify:
+ - Restart chronyd
+
+- name: enabled timedatectl set-ntp yes
+ timedatectl:
+ ntp: True
+
+- name:
+ service:
+ name: chronyd
+ state: started
+ enabled: yes
diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2
new file mode 100644
index 000000000..de43b6364
--- /dev/null
+++ b/roles/chrony/templates/chrony.conf.j2
@@ -0,0 +1,45 @@
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% for server in chrony_ntp_servers %}
+server {{ server }} iburst
+{% endfor %}
+
+# Ignore stratum in source selection.
+stratumweight 0
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Enable kernel RTC synchronization.
+rtcsync
+
+# In first three updates step the system clock instead of slew
+# if the adjustment is larger than 10 seconds.
+makestep 10 3
+
+# Allow NTP client access from local network.
+#allow 192.168/16
+
+# Listen for commands only on localhost.
+bindcmdaddress 127.0.0.1
+bindcmdaddress ::1
+
+# Serve time even if not synchronized to any NTP server.
+#local stratum 10
+
+keyfile /etc/chrony.keys
+
+# Specify the key used as password for chronyc.
+commandkey 1
+
+# Generate command key if missing.
+generatecommandkey
+
+# Disable logging of client accesses.
+noclientlog
+
+# Send a message to syslog if a clock adjustment is larger than 0.5 seconds.
+logchange 0.5
+
+logdir /var/log/chrony
+#log measurements statistics tracking
diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml
new file mode 100644
index 000000000..061a21547
--- /dev/null
+++ b/roles/chrony/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for chrony
diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py
new file mode 100644
index 000000000..b6eab5918
--- /dev/null
+++ b/roles/lib_timedatectl/library/timedatectl.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+'''
+ timedatectl ansible module
+
+ This module supports setting ntp enabled
+'''
+import subprocess
+
+
+
+
+def do_timedatectl(options=None):
+ ''' subprocess timedatectl '''
+
+ cmd = ['/usr/bin/timedatectl']
+ if options:
+ cmd += options.split()
+
+ proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE)
+ proc.wait()
+ return proc.stdout.read()
+
+def main():
+ ''' Ansible module for timedatectl
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ #state=dict(default='enabled', type='str'),
+ ntp=dict(default=True, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ # do something
+ ntp_enabled = False
+
+ results = do_timedatectl()
+
+ for line in results.split('\n'):
+ if 'NTP enabled' in line:
+ if 'yes' in line:
+ ntp_enabled = True
+
+ ########
+ # Enable NTP
+ ########
+ if module.params['ntp']:
+ if ntp_enabled:
+ module.exit_json(changed=False, results="enabled", state="enabled")
+
+ # Enable it
+ # Commands to enable ntp
+ else:
+ results = do_timedatectl('set-ntp yes')
+ module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results)
+
+ #########
+ # Disable NTP
+ #########
+ else:
+ if not ntp_enabled:
+ module.exit_json(changed=False, results="disabled", state="disabled")
+
+ results = do_timedatectl('set-ntp no')
+ module.exit_json(changed=True, results="disabled", state="disabled")
+
+ module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown")
+
+# Pylint is getting in the way of basic Ansible
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 47749389e..61344357a 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -57,6 +57,7 @@
expression: "{{ item.expression }}"
priority: "{{ item.priority }}"
url: "{{ item.url | default(None, True) }}"
+ status: "{{ item.status | default('', True) }}"
with_items: template.ztriggers
when: template.ztriggers is defined
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 2a3d4acbd..831d78b83 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -188,9 +188,6 @@ def normalize_gce_facts(metadata, facts):
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
- # Default to no sdn for GCE deployments
- facts['use_openshift_sdn'] = False
-
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
@@ -884,10 +881,6 @@ def apply_provider_facts(facts, provider_facts):
if not provider_facts:
return facts
- use_openshift_sdn = provider_facts.get('use_openshift_sdn')
- if isinstance(use_openshift_sdn, bool):
- facts['common']['use_openshift_sdn'] = use_openshift_sdn
-
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
@@ -1078,7 +1071,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd', 'nfs']
+ known_roles = ['common', 'master', 'node', 'etcd', 'nfs']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
index a05e552e3..dd13e76f7 100644
--- a/roles/os_zabbix/vars/template_docker.yml
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -72,10 +72,12 @@ g_template_docker:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc'
priority: high
+ # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6)
- name: 'docker.container.dns.resolution failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc'
priority: average
+ status: disabled
- name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}'
expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0'
diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md
new file mode 100644
index 000000000..4215f9eeb
--- /dev/null
+++ b/roles/oso_monitoring_tools/README.md
@@ -0,0 +1,54 @@
+Role Name
+=========
+
+This role will install the Openshift Monitoring Utilities
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+osomt_zagg_client_config
+
+from vars/main.yml:
+
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+- role: "oso_monitoring_tools"
+ osomt_host_name: hostname
+ osomt_zagg_url: http://path.to/zagg_web
+ osomt_zagg_user: admin
+ osomt_zagg_password: password
+ osomt_zagg_ssl_verify: True
+ osomt_zagg_verbose: False
+ osomt_zagg_debug: False
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+Openshift Operations
diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml
new file mode 100644
index 000000000..a17424f25
--- /dev/null
+++ b/roles/oso_monitoring_tools/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml
new file mode 100644
index 000000000..cefa780ab
--- /dev/null
+++ b/roles/oso_monitoring_tools/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml
new file mode 100644
index 000000000..9c42b68dc
--- /dev/null
+++ b/roles/oso_monitoring_tools/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: OpenShift Operations
+ description: Install Openshift Monitoring tools
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml
new file mode 100644
index 000000000..c90fc56e2
--- /dev/null
+++ b/roles/oso_monitoring_tools/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for oso_monitoring_tools
+- name: Install the Openshift Tools RPMS
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - openshift-tools-scripts-monitoring-zagg-client
+ - python-openshift-tools-monitoring-zagg
+ - python-openshift-tools-monitoring-zabbix
+
+- debug: var=g_zagg_client_config
+
+- name: Generate the /etc/openshift_tools/zagg_client.yaml config file
+ copy:
+ content: "{{ osomt_zagg_client_config | to_nice_yaml }}"
+ dest: /etc/openshift_tools/zagg_client.yaml
+ mode: "644"
diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml
new file mode 100644
index 000000000..3538ba30b
--- /dev/null
+++ b/roles/oso_monitoring_tools/vars/main.yml
@@ -0,0 +1,12 @@
+---
+# vars file for oso_monitoring_tools
+osomt_zagg_client_config:
+ host:
+ name: "{{ osomt_host_name }}"
+ zagg:
+ url: "{{ osomt_zagg_url }}"
+ user: "{{ osomt_zagg_user }}"
+ pass: "{{ osomt_zagg_password }}"
+ ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+ verbose: "{{ osomt_zagg_verbose }}"
+ debug: "{{ osomt_zagg_debug }}"
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 4e30929da..1aacf3a4b 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -127,14 +127,13 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
masters_set = True
host_props['node'] = True
- #TODO: Reenable this option once container installs are out of tech preview
- rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
- type=click.Choice(['rpm', 'container']),
- default='rpm')
- if rpm_or_container == 'container':
- host_props['containerized'] = True
- else:
- host_props['containerized'] = False
+ host_props['containerized'] = False
+ if oo_cfg.settings['variant_version'] != '3.0':
+ rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
if existing_env:
host_props['new_host'] = True
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 20401f812..c0d115fdc 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -128,6 +128,8 @@ def write_host(host, inventory, schedulable=None):
facts += ' openshift_hostname={}'.format(host.hostname)
if host.public_hostname:
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ if host.containerized:
+ facts += ' containerized={}'.format(host.containerized)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.