summaryrefslogtreecommitdiffstats
path: root/roles/openshift_node
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_node')
-rw-r--r--roles/openshift_node/defaults/main.yml22
-rw-r--r--roles/openshift_node/files/bootstrap.yml8
-rwxr-xr-xroles/openshift_node/files/networkmanager/99-origin-dns.sh128
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/meta/main.yml8
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml6
-rw-r--r--roles/openshift_node/tasks/config.yml6
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/dnsmasq.yml69
-rw-r--r--roles/openshift_node/tasks/dnsmasq/network-manager.yml10
-rw-r--r--roles/openshift_node/tasks/dnsmasq/no-network-manager.yml13
-rw-r--r--roles/openshift_node/tasks/docker/upgrade.yml42
-rw-r--r--roles/openshift_node/tasks/install.yml8
-rw-r--r--roles/openshift_node/tasks/main.yml27
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml6
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml13
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml25
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml2
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml2
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml22
-rw-r--r--roles/openshift_node/tasks/upgrade.yml184
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml14
-rw-r--r--roles/openshift_node/tasks/upgrade/restart.yml42
-rw-r--r--roles/openshift_node/tasks/upgrade/rpm_upgrade.yml33
-rw-r--r--roles/openshift_node/templates/node-dnsmasq.conf.j22
-rw-r--r--roles/openshift_node/templates/node.service.j26
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.dep.service6
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service10
-rw-r--r--roles/openshift_node/templates/openvswitch.docker.service8
-rw-r--r--roles/openshift_node/templates/origin-dns.conf.j212
33 files changed, 698 insertions, 63 deletions
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0c6d8db38..f3867fe4a 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,12 +1,28 @@
---
openshift_node_debug_level: "{{ debug_level | default(2) }}"
+openshift_node_dnsmasq_install_network_manager_hook: true
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
+
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_image_tag: ''
default_r_openshift_node_image_prep_packages:
@@ -85,6 +101,10 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_use_crio: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
+
+openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}"
# NOTE
# r_openshift_node_*_default may be defined external to this role.
@@ -115,3 +135,5 @@ openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
+
+openshift_node_use_instance_profiles: False
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
index ea280640f..a5545c81b 100644
--- a/roles/openshift_node/files/bootstrap.yml
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -61,3 +61,11 @@
with_items:
- line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
+
+ - name: "Start the {{ openshift_service_type }}-node service"
+ systemd:
+ daemon_reload: yes
+ state: restarted
+ enabled: True
+ name: "{{ openshift_service_type }}-node"
+ no_block: true
diff --git a/roles/openshift_node/files/networkmanager/99-origin-dns.sh b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
new file mode 100755
index 000000000..f4e48b5b7
--- /dev/null
+++ b/roles/openshift_node/files/networkmanager/99-origin-dns.sh
@@ -0,0 +1,128 @@
+#!/bin/bash -x
+# -*- mode: sh; sh-indentation: 2 -*-
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# - If this host is also a master, reconfigure master dnsConfig to listen on
+# 8053 to avoid conflicts on port 53 and open port 8053 in the firewall
+# - Drop this script in /etc/NetworkManager/dispatcher.d/
+# - systemctl restart NetworkManager
+# - Configure node-config.yaml to set dnsIP: to the ip address of this
+# node
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change|dhcp6-change)$ ]]; then
+ # If the origin-upstream-dns config file changed we need to restart
+ NEEDS_RESTART=0
+ UPSTREAM_DNS='/etc/dnsmasq.d/origin-upstream-dns.conf'
+ # We'll regenerate the dnsmasq origin config in a temp file first
+ UPSTREAM_DNS_TMP=`mktemp`
+ UPSTREAM_DNS_TMP_SORTED=`mktemp`
+ CURRENT_UPSTREAM_DNS_SORTED=`mktemp`
+ NEW_RESOLV_CONF=`mktemp`
+ NEW_NODE_RESOLV_CONF=`mktemp`
+
+
+ ######################################################################
+ # couldn't find an existing method to determine if the interface owns the
+ # default route
+ def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+ def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
+ def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
+ if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+ if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+ cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+enable-dbus
+dns-forward-max=5000
+cache-size=5000
+EOF
+ # New config file, must restart
+ NEEDS_RESTART=1
+ fi
+
+ # If network manager doesn't know about the nameservers then the best
+ # we can do is grab them from /etc/resolv.conf but only if we've got no
+ # watermark
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+ if [[ -z "${IP4_NAMESERVERS}" || "${IP4_NAMESERVERS}" == "${def_route_ip}" ]]; then
+ IP4_NAMESERVERS=`grep '^nameserver ' /etc/resolv.conf | awk '{ print $2 }'`
+ fi
+ ######################################################################
+ # Write out default nameservers for /etc/dnsmasq.d/origin-upstream-dns.conf
+ # and /etc/origin/node/resolv.conf in their respective formats
+ for ns in ${IP4_NAMESERVERS}; do
+ if [[ ! -z $ns ]]; then
+ echo "server=${ns}" >> $UPSTREAM_DNS_TMP
+ echo "nameserver ${ns}" >> $NEW_NODE_RESOLV_CONF
+ fi
+ done
+ # Sort it in case DNS servers arrived in a different order
+ sort $UPSTREAM_DNS_TMP > $UPSTREAM_DNS_TMP_SORTED
+ sort $UPSTREAM_DNS > $CURRENT_UPSTREAM_DNS_SORTED
+ # Compare to the current config file (sorted)
+ NEW_DNS_SUM=`md5sum ${UPSTREAM_DNS_TMP_SORTED} | awk '{print $1}'`
+ CURRENT_DNS_SUM=`md5sum ${CURRENT_UPSTREAM_DNS_SORTED} | awk '{print $1}'`
+ if [ "${NEW_DNS_SUM}" != "${CURRENT_DNS_SUM}" ]; then
+ # DNS has changed, copy the temp file to the proper location (-Z
+ # sets default selinux context) and set the restart flag
+ cp -Z $UPSTREAM_DNS_TMP $UPSTREAM_DNS
+ NEEDS_RESTART=1
+ fi
+ # compare /etc/origin/node/resolv.conf checksum and replace it if different
+ NEW_NODE_RESOLV_CONF_MD5=`md5sum ${NEW_NODE_RESOLV_CONF}`
+ OLD_NODE_RESOLV_CONF_MD5=`md5sum /etc/origin/node/resolv.conf`
+ if [ "${NEW_NODE_RESOLV_CONF_MD5}" != "${OLD_NODE_RESOLV_CONF_MD5}" ]; then
+ cp -Z $NEW_NODE_RESOLV_CONF /etc/origin/node/resolv.conf
+ fi
+ fi
+
+ if ! `systemctl -q is-active dnsmasq.service`; then
+ NEEDS_RESTART=1
+ fi
+
+ ######################################################################
+ if [ "${NEEDS_RESTART}" -eq "1" ]; then
+ systemctl restart dnsmasq
+ fi
+
+ # Only if dnsmasq is running properly make it our only nameserver and place
+ # a watermark on /etc/resolv.conf
+ if `systemctl -q is-active dnsmasq.service`; then
+ if ! grep -q '99-origin-dns.sh' /etc/resolv.conf; then
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> ${NEW_RESOLV_CONF}
+ fi
+ sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
+ echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
+ if ! grep -qw search ${NEW_RESOLV_CONF}; then
+ echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
+ elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
+ sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
+ fi
+ cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
+ fi
+ fi
+
+ # Clean up after yourself
+ rm -f $UPSTREAM_DNS_TMP $UPSTREAM_DNS_TMP_SORTED $CURRENT_UPSTREAM_DNS_SORTED $NEW_RESOLV_CONF
+fi
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index b102c1b18..229c6bbed 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,15 @@
---
+- name: restart NetworkManager
+ systemd:
+ name: NetworkManager
+ state: restarted
+ enabled: True
+
+- name: restart dnsmasq
+ systemd:
+ name: dnsmasq
+ state: restarted
+
- name: restart openvswitch
systemd:
name: openvswitch
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 5bc7b9869..70057c7f3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,9 +13,11 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_node_facts
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_clock
-- role: openshift_docker
+ when: not (openshift_node_upgrade_in_progress | default(False))
- role: openshift_cloud_provider
-- role: openshift_node_dnsmasq
+ when: not (openshift_node_upgrade_in_progress | default(False))
+- role: lib_utils
+ when: openshift_node_upgrade_in_progress | default(False)
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 8cf41ab4c..a042bc01b 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -4,6 +4,8 @@
name: "{{ item }}"
state: present
with_items: "{{ r_openshift_node_image_prep_packages }}"
+ register: result
+ until: result | success
- name: create the directory for node
file:
@@ -32,8 +34,8 @@
regexp: "^CONFIG_FILE=.*"
- name: include aws sysconfig credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
+ when: not (openshift_node_use_instance_profiles | default(False))
#- name: update the ExecStart to have bootstrap
# lineinfile:
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index c08f43118..741a2234f 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -1,6 +1,6 @@
---
- name: Install the systemd units
- include: systemd_units.yml
+ include_tasks: systemd_units.yml
- name: Start and enable openvswitch service
systemd:
@@ -47,8 +47,8 @@
- restart node
- name: include aws provider credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
+ when: not (openshift_node_use_instance_profiles | default(False))
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
new file mode 100644
index 000000000..f92ff79b5
--- /dev/null
+++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Node docker service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: openshift.docker.node.service
+ notify:
+ - reload systemd units
+ - restart node
diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml
new file mode 100644
index 000000000..f210a3a21
--- /dev/null
+++ b/roles/openshift_node/tasks/dnsmasq.yml
@@ -0,0 +1,69 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+ changed_when: false
+ ignore_errors: True
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ package: name=dnsmasq state=installed
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+- name: ensure origin/node directory exists
+ file:
+ state: directory
+ path: "{{ item }}"
+ owner: root
+ group: root
+ mode: '0700'
+ with_items:
+ - /etc/origin
+ - /etc/origin/node
+
+# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed
+# when the node stops. A dbus-message is sent to dnsmasq to add the same entries
+# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or
+# newer we can use --server-file option to update the servers dynamically and
+# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else
+# triggers a restart of dnsmasq but not a node restart.
+- name: Install node-dnsmasq.conf
+ template:
+ src: node-dnsmasq.conf.j2
+ dest: /etc/origin/node/node-dnsmasq.conf
+
+- name: Install dnsmasq configuration
+ template:
+ src: origin-dns.conf.j2
+ dest: /etc/dnsmasq.d/origin-dns.conf
+ notify: restart dnsmasq
+
+- name: Deploy additional dnsmasq.conf
+ template:
+ src: "{{ openshift_node_dnsmasq_additional_config_file }}"
+ dest: /etc/dnsmasq.d/openshift-ansible.conf
+ owner: root
+ group: root
+ mode: 0644
+ when: openshift_node_dnsmasq_additional_config_file is defined
+ notify: restart dnsmasq
+
+- name: Enable dnsmasq
+ systemd:
+ name: dnsmasq
+ enabled: yes
+ state: started
+
+# Dynamic NetworkManager based dispatcher
+- include_tasks: dnsmasq/network-manager.yml
+ when: network_manager_active | bool
+
+# Relies on ansible in order to configure static config
+- include_tasks: dnsmasq/no-network-manager.yml
+ when: not network_manager_active | bool
diff --git a/roles/openshift_node/tasks/dnsmasq/network-manager.yml b/roles/openshift_node/tasks/dnsmasq/network-manager.yml
new file mode 100644
index 000000000..e5a92a630
--- /dev/null
+++ b/roles/openshift_node/tasks/dnsmasq/network-manager.yml
@@ -0,0 +1,10 @@
+---
+- name: Install network manager dispatch script
+ copy:
+ src: networkmanager/99-origin-dns.sh
+ dest: /etc/NetworkManager/dispatcher.d/
+ mode: 0755
+ notify: restart NetworkManager
+ when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
+
+- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
new file mode 100644
index 000000000..541c8115a
--- /dev/null
+++ b/roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
@@ -0,0 +1,13 @@
+---
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
+ when: not openshift_node_bootstrap | bool
+
+- name: Install NetworkManager during node_bootstrap provisioning
+ package:
+ name: NetworkManager
+ state: present
+ notify: restart NetworkManager
+ register: result
+ until: result | success
+
+- include_tasks: network-manager.yml
diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml
new file mode 100644
index 000000000..d743d2188
--- /dev/null
+++ b/roles/openshift_node/tasks/docker/upgrade.yml
@@ -0,0 +1,42 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - openshift.common.is_containerized
+# - docker_upgrade_nuke_images
+# - docker_version
+# - skip_docker_restart
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+
+- debug: var=docker_image_count.stdout
+
+# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
+- name: Remove all containers and images
+ script: nuke_images.sh
+ register: nuke_images_result
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- service:
+ name: docker
+ state: stopped
+ register: l_openshift_node_upgrade_docker_stop_result
+ until: not l_openshift_node_upgrade_docker_stop_result | failed
+ retries: 3
+ delay: 30
+
+- name: Upgrade Docker
+ package: name=docker{{ '-' + docker_version }} state=present
+ register: result
+ until: result | success
+
+# starting docker happens back in ../main.yml where it calls ../restart.yml
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 6b7e40491..1ed4a05c1 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -5,6 +5,8 @@
package:
name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
+ register: result
+ until: result | success
- name: Install sdn-ovs package
package:
@@ -12,15 +14,19 @@
state: present
when:
- openshift_node_use_openshift_sdn | bool
+ register: result
+ until: result | success
- name: Install conntrack-tools package
package:
name: "conntrack-tools"
state: present
+ register: result
+ until: result | success
- when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
+ - not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index eae9ca7bc..e60d96760 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -4,11 +4,12 @@
when:
- (not ansible_selinux or ansible_selinux.status != 'enabled')
- deployment_type == 'openshift-enterprise'
- - not openshift_use_crio | default(false)
+ - not openshift_use_crio
+
+- include: dnsmasq.yml
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
#### Disable SWAP #####
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
@@ -41,14 +42,14 @@
#### End Disable Swap Block ####
- name: include node installer
- include: install.yml
+ include_tasks: install.yml
- name: Restart cri-o
systemd:
name: cri-o
enabled: yes
state: restarted
- when: openshift_use_crio | default(false)
+ when: openshift_use_crio
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
@@ -66,34 +67,34 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: include standard node config
- include: config.yml
+ include_tasks: config.yml
#### Storage class plugins here ####
- name: NFS storage plugin configuration
- include: storage_plugins/nfs.yml
+ include_tasks: storage_plugins/nfs.yml
tags:
- nfs
- name: GlusterFS storage plugin configuration
- include: storage_plugins/glusterfs.yml
+ include_tasks: storage_plugins/glusterfs.yml
when: "'glusterfs' in openshift.node.storage_plugin_deps"
- name: Ceph storage plugin configuration
- include: storage_plugins/ceph.yml
+ include_tasks: storage_plugins/ceph.yml
when: "'ceph' in openshift.node.storage_plugin_deps"
- name: iSCSI storage plugin configuration
- include: storage_plugins/iscsi.yml
+ include_tasks: storage_plugins/iscsi.yml
when: "'iscsi' in openshift.node.storage_plugin_deps"
##### END Storage #####
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
+- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
- name: include bootstrap node config
- include: bootstrap.yml
+ include_tasks: bootstrap.yml
when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 164a79b39..eb8d9a6a5 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -6,16 +6,16 @@
- name: Pre-pull node system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update node system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
+ - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift.common.service_type }}.service"
state: latest
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 0f73ce454..d33e172c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -1,14 +1,11 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
-
-- set_fact:
l_service_name: "cri-o"
- when: l_use_crio
+ when: openshift_use_crio
- set_fact:
- l_service_name: "{{ openshift.docker.service_name }}"
- when: not l_use_crio
+ l_service_name: "{{ openshift_docker_service_name }}"
+ when: not openshift_use_crio
- name: Ensure proxies are in the atomic.conf
include_role:
@@ -17,14 +14,14 @@
- name: Pre-pull OpenVSwitch system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update OpenVSwitch system container
oc_atomic_container:
name: openvswitch
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- "DOCKER_SERVICE={{ l_service_name }}"
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index 5e5e4f94a..ab43ec049 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
@@ -17,6 +18,24 @@
notify:
- restart node
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create_alt
+ notify:
+ - restart node
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
@@ -24,4 +43,8 @@
when:
- openshift.common.is_containerized | bool
- oreg_auth_user is defined
- - (node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace or node_oreg_auth_credentials_create.changed) | bool
+ - >
+ (node_oreg_auth_credentials_stat.stat.exists
+ or oreg_auth_credentials_replace
+ or node_oreg_auth_credentials_create.changed
+ or node_oreg_auth_credentials_create_alt.changed) | bool
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index 037efe81a..72a3b837f 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -2,3 +2,5 @@
- name: Install Ceph storage plugin dependencies
package: name=ceph-common state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 1b8a7ad50..08ea71a0c 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -2,6 +2,8 @@
- name: Install GlusterFS storage plugin dependencies
package: name=glusterfs-fuse state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check for existence of fusefs sebooleans
command: getsebool {{ item }}
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index 1c5478c55..ece68dc71 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -2,3 +2,5 @@
- name: Install iSCSI storage plugin dependencies
package: name=iscsi-initiator-utils state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 7e1035893..5eacf42e8 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -2,6 +2,8 @@
- name: Install NFS storage plugin dependencies
package: name=nfs-utils state=present
when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
- name: Check for existence of nfs sebooleans
command: getsebool {{ item }}
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 9c182ade6..397e1ba18 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -3,7 +3,7 @@
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
- when: not openshift.common.is_node_system_container | bool
+ when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
@@ -11,21 +11,21 @@
- when: openshift.common.is_containerized | bool
block:
- name: include node deps docker service file
- include: config/install-node-deps-docker-service-file.yml
+ include_tasks: config/install-node-deps-docker-service-file.yml
- name: include ovs service environment file
- include: config/install-ovs-service-env-file.yml
+ include_tasks: config/install-ovs-service-env-file.yml
- name: Install Node system container
- include: node_system_container.yml
+ include_tasks: node_system_container.yml
when:
- - openshift.common.is_node_system_container | bool
+ - l_is_node_system_container | bool
- name: Install OpenvSwitch system containers
- include: openvswitch_system_container.yml
+ include_tasks: openvswitch_system_container.yml
when:
- openshift_node_use_openshift_sdn | bool
- - openshift.common.is_openvswitch_system_container | bool
+ - l_is_openvswitch_system_container | bool
- block:
- name: Pre-pull openvswitch image
@@ -34,11 +34,11 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- - include: config/install-ovs-docker-service-file.yml
+ - include_tasks: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- openshift_node_use_openshift_sdn | bool
- - not openshift.common.is_openvswitch_system_container | bool
+ - not l_is_openvswitch_system_container | bool
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
+- include_tasks: config/configure-node-settings.yml
+- include_tasks: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
new file mode 100644
index 000000000..561b56918
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -0,0 +1,184 @@
+---
+# input variables:
+# - l_docker_upgrade
+# - openshift.common.is_atomic
+# - node_config_hook
+# - openshift_pkg_version
+# - openshift.common.is_containerized
+# - deployment_type
+# - openshift_release
+
+# tasks file for openshift_node_upgrade
+
+- include_tasks: registry_auth.yml
+
+- name: Stop node and openvswitch services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+
+- name: Stop additional containerized services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-master-api"
+ - etcd_container
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Pre-pull node image
+ command: >
+ docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+- name: Pre-pull openvswitch image
+ command: >
+ docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift_use_openshift_sdn | bool
+
+- include_tasks: docker/upgrade.yml
+ vars:
+ # We will restart Docker ourselves after everything is ready:
+ skip_docker_restart: True
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+
+- include_tasks: "{{ node_config_hook }}"
+ when: node_config_hook is defined
+
+- include_tasks: upgrade/rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+- name: Remove obsolete docker-sdn-ovs.conf
+ file:
+ path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
+ state: absent
+
+- include_tasks: upgrade/containerized_node_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+- name: Ensure containerized services stopped before Docker restart
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Stop rpm based services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-node"
+ - openvswitch
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+
+# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
+- name: Clean up dockershim data
+ file:
+ path: "/var/lib/dockershim/sandbox/"
+ state: absent
+
+- name: Upgrade openvswitch
+ package:
+ name: openvswitch
+ state: latest
+ when: not openshift.common.is_containerized | bool
+ register: result
+ until: result | success
+
+- name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
+
+# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
+- name: Check for swap usage
+ command: grep "^[^#].*swap" /etc/fstab
+ # grep: match any lines which don't begin with '#' and contain 'swap'
+ changed_when: false
+ failed_when: false
+ register: swap_result
+
+ # Disable Swap Block
+- block:
+
+ - name: Disable swap
+ command: swapoff --all
+
+ - name: Remove swap entries from /etc/fstab
+ replace:
+ dest: /etc/fstab
+ regexp: '(^[^#].*swap.*)'
+ replace: '# \1'
+ backup: yes
+
+ - name: Add notice about disabling swap
+ lineinfile:
+ dest: /etc/fstab
+ line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines'
+ state: present
+
+ when:
+ - swap_result.stdout_lines | length > 0
+ - openshift_disable_swap | default(true) | bool
+ # End Disable Swap Block
+
+- name: Reset selinux context
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
+ when:
+ - ansible_selinux is defined
+ - ansible_selinux.status == 'enabled'
+
+- name: Apply 3.6 dns config changes
+ yedit:
+ src: /etc/origin/node/node-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: "dnsBindAddress"
+ value: "127.0.0.1:53"
+ - key: "dnsRecursiveResolvConf"
+ value: "/etc/origin/node/resolv.conf"
+
+# Restart all services
+- include_tasks: upgrade/restart.yml
+
+- name: Wait for node to be ready
+ oc_obj:
+ state: list
+ kind: node
+ name: "{{ openshift.common.hostname | lower }}"
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ # Give the node two minutes to come back online.
+ retries: 24
+ delay: 5
+
+- include_tasks: dnsmasq.yml
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
new file mode 100644
index 000000000..245de60a7
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -0,0 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+ skip_node_svc_handlers: True
+
+- name: Update systemd units
+ include_tasks: ../systemd_units.yml
+
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml
new file mode 100644
index 000000000..3f1abceab
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/restart.yml
@@ -0,0 +1,42 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - openshift.common.is_containerized
+# - openshift.common.hostname
+# - openshift.master.api_port
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd to ensure latest unit files
+ command: systemctl daemon-reload
+
+- name: Restart container runtime
+ service:
+ name: "{{ openshift_docker_service_name }}"
+ state: started
+ register: docker_start_result
+ until: not docker_start_result | failed
+ retries: 3
+ delay: 30
+
+- name: Start services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
+ timeout: 600
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
new file mode 100644
index 000000000..fcbe1a598
--- /dev/null
+++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
@@ -0,0 +1,33 @@
+---
+# input variables:
+# - openshift.common.service_type
+# - component
+# - openshift_pkg_version
+# - openshift.common.is_atomic
+
+# We verified latest rpm available is suitable, so just yum update.
+- name: Upgrade packages
+ package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ register: result
+ until: result | success
+
+- name: Ensure python-yaml present for config upgrade
+ package: name=PyYAML state=present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "node.service.j2"
+ register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node/templates/node-dnsmasq.conf.j2 b/roles/openshift_node/templates/node-dnsmasq.conf.j2
new file mode 100644
index 000000000..3caa3bd4a
--- /dev/null
+++ b/roles/openshift_node/templates/node-dnsmasq.conf.j2
@@ -0,0 +1,2 @@
+server=/in-addr.arpa/127.0.0.1
+server=/{{ openshift.common.dns_domain }}/127.0.0.1
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 7602d8ee6..da751bd65 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -1,14 +1,14 @@
[Unit]
Description=OpenShift Node
-After={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
-Wants={{ openshift.docker.service_name }}.service
+Wants={{ openshift_docker_service_name }}.service
Documentation=https://github.com/openshift/origin
Requires=dnsmasq.service
After=dnsmasq.service
-{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
Type=notify
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 718d35dca..16fdde02e 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -16,7 +16,7 @@ imageConfig:
latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
-{% if openshift_use_crio | default(False) %}
+{% if openshift_use_crio %}
container-runtime:
- remote
container-runtime-endpoint:
@@ -29,13 +29,11 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
runtime-request-timeout:
- 10m
{% endif %}
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
masterClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
burst: 200
qps: 100
-{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
{% if openshift_node_use_openshift_sdn | bool %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service
index fa7238849..5964ac095 100644
--- a/roles/openshift_node/templates/openshift.docker.node.dep.service
+++ b/roles/openshift_node/templates/openshift.docker.node.dep.service
@@ -1,9 +1,9 @@
[Unit]
-Requires={{ openshift.docker.service_name }}.service
-After={{ openshift.docker.service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+After={{ openshift_docker_service_name }}.service
PartOf={{ openshift.common.service_type }}-node.service
Before={{ openshift.common.service_type }}-node.service
-{% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %}
+{% if openshift_use_crio %}Wants=cri-o.service{% endif %}
[Service]
ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi"
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 561aa01f4..3b33ca542 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -1,9 +1,9 @@
[Unit]
After={{ openshift.common.service_type }}-master.service
-After={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
After=openvswitch.service
-PartOf={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
PartOf=openvswitch.service
@@ -26,7 +26,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \
--rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \
-v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \
-e HOST=/rootfs -e HOST_ETC=/host-etc \
- -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} \
+ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \
-v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node \
{% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \
-v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro \
@@ -48,4 +48,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service
index 34aaaabd6..37f091c76 100644
--- a/roles/openshift_node/templates/openvswitch.docker.service
+++ b/roles/openshift_node/templates/openvswitch.docker.service
@@ -1,7 +1,7 @@
[Unit]
-After={{ openshift.docker.service_name }}.service
-Requires={{ openshift.docker.service_name }}.service
-PartOf={{ openshift.docker.service_name }}.service
+After={{ openshift_docker_service_name }}.service
+Requires={{ openshift_docker_service_name }}.service
+PartOf={{ openshift_docker_service_name }}.service
[Service]
EnvironmentFile=/etc/sysconfig/openvswitch
@@ -14,4 +14,4 @@ Restart=always
RestartSec=5s
[Install]
-WantedBy={{ openshift.docker.service_name }}.service
+WantedBy={{ openshift_docker_service_name }}.service
diff --git a/roles/openshift_node/templates/origin-dns.conf.j2 b/roles/openshift_node/templates/origin-dns.conf.j2
new file mode 100644
index 000000000..6543c7c3e
--- /dev/null
+++ b/roles/openshift_node/templates/origin-dns.conf.j2
@@ -0,0 +1,12 @@
+no-resolv
+domain-needed
+no-negcache
+max-cache-ttl=1
+enable-dbus
+dns-forward-max=5000
+cache-size=5000
+bind-dynamic
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config