summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/proposals/role_decomposition.md2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml2
-rw-r--r--playbooks/container-runtime/config.yml6
-rw-r--r--playbooks/container-runtime/private/config.yml28
l---------playbooks/container-runtime/private/roles1
-rw-r--r--playbooks/prerequisites.yml8
-rw-r--r--roles/container_runtime/README.md25
-rw-r--r--roles/container_runtime/defaults/main.yml53
-rw-r--r--roles/container_runtime/tasks/common/atomic_proxy.yml (renamed from roles/openshift_atomic/tasks/proxy.yml)0
-rw-r--r--roles/container_runtime/tasks/common/post.yml26
-rw-r--r--roles/container_runtime/tasks/common/pre.yml12
-rw-r--r--roles/container_runtime/tasks/common/setup_docker_symlink.yml38
-rw-r--r--roles/container_runtime/tasks/common/syscontainer_packages.yml28
-rw-r--r--roles/container_runtime/tasks/common/udev_workaround.yml (renamed from roles/container_runtime/tasks/udev_workaround.yml)0
-rw-r--r--roles/container_runtime/tasks/docker_sanity.yml27
-rw-r--r--roles/container_runtime/tasks/main.yml85
-rw-r--r--roles/container_runtime/tasks/package_docker.yml36
-rw-r--r--roles/container_runtime/tasks/systemcontainer_crio.yml83
-rw-r--r--roles/container_runtime/tasks/systemcontainer_docker.yml78
-rw-r--r--roles/etcd/tasks/migration/add_ttls.yml2
-rw-r--r--roles/etcd/tasks/system_container.yml5
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py7
-rw-r--r--roles/openshift_docker_gc/templates/dockergc-ds.yaml.j22
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py75
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml2
-rw-r--r--roles/openshift_master/tasks/system_container.yml4
-rw-r--r--roles/openshift_node/README.md6
-rw-r--r--roles/openshift_node/tasks/main.yml7
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml2
36 files changed, 292 insertions, 414 deletions
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index 6434e24e7..37d080d5c 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -262,7 +262,7 @@ dependencies:
- name: "Create logging project"
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
when: not ansible_check_mode and "not found" in logging_project_result.stderr
- name: Create logging cert directory
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 86aa117c3..5c6def484 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -51,7 +51,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
register: l_docker_upgrade_drain_result
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 9af8edbfa..37fc8a0f6 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -305,7 +305,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_control_plane_drain_result
until: not l_upgrade_control_plane_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 75ffd3fe9..18730a540 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,7 +26,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
index a142ce56d..47410dff3 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
@@ -42,7 +42,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
register: l_upgrade_nodes_drain_result
until: not l_upgrade_nodes_drain_result | failed
diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml
new file mode 100644
index 000000000..f15aa771f
--- /dev/null
+++ b/playbooks/container-runtime/config.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: ../init/main.yml
+ vars:
+ skip_verison: True
+
+- import_playbook: private/config.yml
diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml
new file mode 100644
index 000000000..67445edeb
--- /dev/null
+++ b/playbooks/container-runtime/private/config.yml
@@ -0,0 +1,28 @@
+---
+- hosts: "{{ l_containerized_host_groups }}"
+ vars:
+ l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
+ l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
+ # role: container_runtime is necessary here to bring role default variables
+ # into the play scope.
+ roles:
+ - role: container_runtime
+ tasks:
+ - include_role:
+ name: container_runtime
+ tasks_from: package_docker.yml
+ when:
+ - not openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_docker.yml
+ when:
+ - openshift_docker_use_system_container | bool
+ - not openshift_use_crio_only | bool
+ - include_role:
+ name: container_runtime
+ tasks_from: systemcontainer_crio.yml
+ when:
+ - openshift_use_crio | bool
+ - openshift_docker_is_node_or_master | bool
diff --git a/playbooks/container-runtime/private/roles b/playbooks/container-runtime/private/roles
new file mode 120000
index 000000000..148b13206
--- /dev/null
+++ b/playbooks/container-runtime/private/roles
@@ -0,0 +1 @@
+../../roles/ \ No newline at end of file
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 5fd2b4d7f..0cc5fcef8 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -3,10 +3,4 @@
vars:
skip_verison: True
-- hosts: "{{ l_containerized_host_groups }}"
- vars:
- l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}"
- l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}"
- tasks:
- - include_role:
- name: container_runtime
+- import_playbook: container-runtime/private/config.yml
diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md
index e363c1714..51f469aaf 100644
--- a/roles/container_runtime/README.md
+++ b/roles/container_runtime/README.md
@@ -1,18 +1,23 @@
-Docker
+Container Runtime
=========
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
-Requirements
-------------
+This role is designed to be used with include_role and tasks_from.
-Ansible 2.2
+Entry points
+------------
+* package_docker.yml - install and setup docker container runtime.
+* systemcontainer_docker.yml - utilize docker + systemcontainer
+* systemcontainer_crio.yml - utilize crio + systemcontainer
+* registry_auth.yml - place docker login credentials.
-Mandator Role Variables
---------------
+Requirements
+------------
+Ansible 2.4
Dependencies
@@ -24,9 +29,9 @@ Example Playbook
----------------
- hosts: servers
- roles:
- - role: container_runtime
- docker_udev_workaround: "true"
+ tasks:
+ - include_role: container_runtime
+ tasks_from: package_docker.yml
License
-------
@@ -36,4 +41,4 @@ ASL 2.0
Author Information
------------------
-OpenShift operations, Red Hat, Inc
+Red Hat, Inc
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml
index 62b3e141a..bd96965ac 100644
--- a/roles/container_runtime/defaults/main.yml
+++ b/roles/container_runtime/defaults/main.yml
@@ -71,10 +71,59 @@ docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}"
openshift_use_crio: False
openshift_use_crio_only: False
+l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}"
+l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+# --------------------- #
+# systemcontainers_crio #
+# --------------------- #
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
-l_openshift_image_tag_default: "{{ openshift_release }}"
-l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}"
+openshift_crio_image_tag_default: "latest"
+
+l_crt_crio_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}"
+
+l_crt_crio_image_prepend_dict:
+ openshift-enterprise: "registry.access.redhat.com/openshift3"
+ origin: "docker.io/gscrivano"
+
+l_crt_crio_image_dict:
+ Fedora:
+ crio_image_name: "cri-o-fedora"
+ crio_image_tag: "latest"
+ CentOS:
+ crio_image_name: "cri-o-centos"
+ crio_image_tag: "latest"
+ RedHat:
+ crio_image_name: "cri-o"
+ crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}"
+
+l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}"
+l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}"
+l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}"
+
+l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
+l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}"
+
+# ----------------------- #
+# systemcontainers_docker #
+# ----------------------- #
+l_crt_docker_image_prepend_dict:
+ Fedora: "registry.fedoraproject.org/f25"
+ Centos: "docker.io/gscrivano"
+ RedHat: "registry.access.redhat.com/openshift3"
+
+openshift_docker_image_tag_default: "latest"
+l_crt_docker_image_tag_dict:
+ openshift-enterprise: "{{ l_openshift_image_tag }}"
+ origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}"
+
+l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}"
+l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}"
+
+l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
+l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}"
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml
index dde099984..dde099984 100644
--- a/roles/openshift_atomic/tasks/proxy.yml
+++ b/roles/container_runtime/tasks/common/atomic_proxy.yml
diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml
new file mode 100644
index 000000000..d790eb2c0
--- /dev/null
+++ b/roles/container_runtime/tasks/common/post.yml
@@ -0,0 +1,26 @@
+---
+- name: Ensure /var/lib/containers exists
+ file:
+ path: /var/lib/containers
+ state: directory
+
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
+- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+# registry_auth is called directly with include_role in some places, so we
+# have to put it in the root of the tasks/ directory.
+- include_tasks: ../registry_auth.yml
+
+- name: stat the docker data dir
+ stat:
+ path: "{{ docker_default_storage_path }}"
+ register: dockerstat
+
+- include_tasks: setup_docker_symlink.yml
+ when:
+ - openshift_use_crio
+ - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml
new file mode 100644
index 000000000..990fe66da
--- /dev/null
+++ b/roles/container_runtime/tasks/common/pre.yml
@@ -0,0 +1,12 @@
+---
+- include_tasks: udev_workaround.yml
+ when: docker_udev_workaround | default(False) | bool
+
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not openshift_use_crio_only | bool
diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
new file mode 100644
index 000000000..d7aeb192e
--- /dev/null
+++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml
@@ -0,0 +1,38 @@
+---
+- block:
+ - name: stop the current running docker
+ systemd:
+ state: stopped
+ name: "{{ openshift_docker_service_name }}"
+
+ - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
+ command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc != 0
+
+ - name: "Set the selinux context on {{ docker_alt_storage_path }}"
+ command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
+ register: results
+ failed_when:
+ - results.rc == 1
+ - "'already exists' not in results.stderr"
+
+ - name: "restorecon the {{ docker_alt_storage_path }}"
+ command: "restorecon -r {{ docker_alt_storage_path }}"
+
+ - name: Remove the old docker location
+ file:
+ state: absent
+ path: "{{ docker_default_storage_path }}"
+
+ - name: Setup the link
+ file:
+ state: link
+ src: "{{ docker_alt_storage_path }}"
+ path: "{{ docker_default_storage_path }}"
+
+ - name: start docker
+ systemd:
+ state: started
+ name: "{{ openshift_docker_service_name }}"
diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml
new file mode 100644
index 000000000..715ed492d
--- /dev/null
+++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml
@@ -0,0 +1,28 @@
+---
+
+- name: Ensure container-selinux is installed
+ package:
+ name: container-selinux
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# Used to pull and install the system container
+- name: Ensure atomic is installed
+ package:
+ name: atomic
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
+
+# At the time of writing the atomic command requires runc for it's own use. This
+# task is here in the even that the atomic package ever removes the dependency.
+- name: Ensure runc is installed
+ package:
+ name: runc
+ state: present
+ when: not openshift.common.is_atomic | bool
+ register: result
+ until: result | success
diff --git a/roles/container_runtime/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml
index 257c3123d..257c3123d 100644
--- a/roles/container_runtime/tasks/udev_workaround.yml
+++ b/roles/container_runtime/tasks/common/udev_workaround.yml
diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml
new file mode 100644
index 000000000..e62cf5505
--- /dev/null
+++ b/roles/container_runtime/tasks/docker_sanity.yml
@@ -0,0 +1,27 @@
+---
+# Sanity checks to ensure the role will complete and provide helpful error
+# messages for common problems.
+
+- name: Error out if Docker pre-installed but too old
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+
+- name: Error out if requested Docker is too old
+ fail:
+ msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+ when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+ fail:
+ msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+ fail:
+ msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
+ when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml
index 6d68082b1..96d8606c6 100644
--- a/roles/container_runtime/tasks/main.yml
+++ b/roles/container_runtime/tasks/main.yml
@@ -1,85 +1,2 @@
---
-- include_tasks: udev_workaround.yml
- when: docker_udev_workaround | default(False) | bool
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
- - not openshift_use_crio_only | bool
-
-- name: Use Package Docker if Requested
- include_tasks: package_docker.yml
- when:
- - not openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Ensure /var/lib/containers exists
- file:
- path: /var/lib/containers
- state: directory
-
-- name: Fix SELinux Permissions on /var/lib/containers
- command: "restorecon -R /var/lib/containers/"
- changed_when: false
-
-- name: Use System Container Docker if Requested
- include_tasks: systemcontainer_docker.yml
- when:
- - openshift_docker_use_system_container
- - not openshift_use_crio_only
-
-- name: Add CRI-O usage Requested
- include_tasks: systemcontainer_crio.yml
- when:
- - openshift_use_crio
- - openshift_docker_is_node_or_master | bool
-
-- name: stat the docker data dir
- stat:
- path: "{{ docker_default_storage_path }}"
- register: dockerstat
-
-- when:
- - openshift_use_crio
- - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool)
- block:
- - name: stop the current running docker
- systemd:
- state: stopped
- name: "{{ openshift_docker_service_name }}"
-
- - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}"
- command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc != 0
-
- - name: "Set the selinux context on {{ docker_alt_storage_path }}"
- command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}"
- register: results
- failed_when:
- - results.rc == 1
- - "'already exists' not in results.stderr"
-
- - name: "restorecon the {{ docker_alt_storage_path }}"
- command: "restorecon -r {{ docker_alt_storage_path }}"
-
- - name: Remove the old docker location
- file:
- state: absent
- path: "{{ docker_default_storage_path }}"
-
- - name: Setup the link
- file:
- state: link
- src: "{{ docker_alt_storage_path }}"
- path: "{{ docker_default_storage_path }}"
-
- - name: start docker
- systemd:
- state: started
- name: "{{ openshift_docker_service_name }}"
+# This role is meant to be used with include_role and tasks_from.
diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml
index 40ab75a25..89899c9cf 100644
--- a/roles/container_runtime/tasks/package_docker.yml
+++ b/roles/container_runtime/tasks/package_docker.yml
@@ -1,4 +1,6 @@
---
+- include_tasks: common/pre.yml
+
- name: Get current installed Docker version
command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
@@ -7,35 +9,16 @@
until: curr_docker_version | succeeded
changed_when: false
-- name: Error out if Docker pre-installed but too old
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
-
-- name: Error out if requested Docker is too old
- fail:
- msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
- when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-
-# If a docker_version was requested, sanity check that we can install or upgrade to it, and
-# no downgrade is required.
-- name: Fail if Docker version requested but downgrade is required
- fail:
- msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
-
-# This involves an extremely slow migration process, users should instead run the
-# Docker 1.10 upgrade playbook to accomplish this.
-- name: Error out if attempting to upgrade Docker across the 1.10 boundary
- fail:
- msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
- when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+# Some basic checks to ensure the role will complete
+- include_tasks: docker_sanity.yml
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
- package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
+ package:
+ name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
+ state: present
when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
register: result
until: result | success
@@ -161,7 +144,4 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- meta: flush_handlers
-
-# This needs to run after docker is restarted to account for proxy settings.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml
index 48e33f0aa..5ea7df650 100644
--- a/roles/container_runtime/tasks/systemcontainer_crio.yml
+++ b/roles/container_runtime/tasks/systemcontainer_crio.yml
@@ -1,39 +1,14 @@
---
# TODO: Much of this file is shared with container engine tasks
-
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
- name: Check we are not using node as a Docker container with CRI-O
fail: msg='Cannot use CRI-O with node configured as a Docker container'
when:
- openshift.common.is_containerized | bool
- not openshift.common.is_node_system_container | bool
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/pre.yml
+- include_tasks: common/syscontainer_packages.yml
- name: Check that overlay is in the kernel
shell: lsmod | grep overlay
@@ -60,50 +35,11 @@
state: restarted
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set CRI-O image defaults
- set_fact:
- l_crio_image_prepend: "docker.io/gscrivano"
- l_crio_image_name: "cri-o-fedora"
- l_crio_image_tag: "latest"
-
- - name: Use Centos based image when distribution is CentOS
- set_fact:
- l_crio_image_name: "cri-o-centos"
- when: ansible_distribution == "CentOS"
-
- - name: Set CRI-O image tag
- set_fact:
- l_crio_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use RHEL based image when distribution is Red Hat
- set_fact:
- l_crio_image_prepend: "registry.access.redhat.com/openshift3"
- l_crio_image_name: "cri-o"
- when: ansible_distribution == "RedHat"
-
- - name: Set the full image name
- set_fact:
- l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}"
-
- # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548
- - name: Use a specific image if requested
- set_fact:
- l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}"
- when:
- - openshift_crio_systemcontainer_image_override is defined
- - openshift_crio_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_crio_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_crio_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull CRI-O System Container image
@@ -112,7 +48,6 @@
environment:
NO_PROXY: "{{ openshift.common.no_proxy | default('') }}"
-
- name: Install CRI-O System Container
oc_atomic_container:
name: "cri-o"
@@ -154,10 +89,8 @@
daemon_reload: yes
register: start_result
-- meta: flush_handlers
-
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}"
diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml
index 84217e50c..10570fe34 100644
--- a/roles/container_runtime/tasks/systemcontainer_docker.yml
+++ b/roles/container_runtime/tasks/systemcontainer_docker.yml
@@ -11,32 +11,9 @@
traditional docker package install. Otherwise, comment out openshift_docker_options
in your inventory file.
-- name: Ensure container-selinux is installed
- package:
- name: container-selinux
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
-
-# Used to pull and install the system container
-- name: Ensure atomic is installed
- package:
- name: atomic
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/pre.yml
-# At the time of writing the atomic command requires runc for it's own use. This
-# task is here in the even that the atomic package ever removes the dependency.
-- name: Ensure runc is installed
- package:
- name: runc
- state: present
- when: not openshift.common.is_atomic | bool
- register: result
- until: result | success
+- include_tasks: common/syscontainer_packages.yml
# Make sure Docker is installed so we are able to use the client
- name: Install Docker so we can use the client
@@ -59,48 +36,11 @@
delay: 30
- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
-- block:
-
- - name: Set to default prepend
- set_fact:
- l_docker_image_prepend: "gscrivano"
- l_docker_image_tag: "latest"
-
- - name: Set container engine image tag
- set_fact:
- l_docker_image_tag: "{{ l_openshift_image_tag }}"
- when:
- - openshift_deployment_type == 'openshift-enterprise'
-
- - name: Use Red Hat Registry for image when distribution is Red Hat
- set_fact:
- l_docker_image_prepend: "registry.access.redhat.com/openshift3"
- when: ansible_distribution == 'RedHat'
-
- - name: Use Fedora Registry for image when distribution is Fedora
- set_fact:
- l_docker_image_prepend: "registry.fedoraproject.org/f25"
- when: ansible_distribution == 'Fedora'
-
- - name: Set the full image name
- set_fact:
- l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}"
-
- # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
- - name: Use a specific image if requested
- set_fact:
- l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
- when:
- - openshift_docker_systemcontainer_image_override is defined
- - openshift_docker_systemcontainer_image_override != ""
-
- # Be nice and let the user see the variable result
- - debug:
- var: l_docker_image
+ include_tasks: common/atomic_proxy.yml
+
+# Be nice and let the user see the variable result
+- debug:
+ var: l_docker_image
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
@@ -154,10 +94,8 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- meta: flush_handlers
-
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include_tasks: registry_auth.yml
+- include_tasks: common/post.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml
index 4bdc6bcc3..a4b0ff31d 100644
--- a/roles/etcd/tasks/migration/add_ttls.yml
+++ b/roles/etcd/tasks/migration/add_ttls.yml
@@ -11,7 +11,7 @@
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
- oadm migrate etcd-ttl \
+ {{ openshift.common.client_binary }} adm migrate etcd-ttl \
--cert {{ r_etcd_common_master_peer_cert_file }} \
--key {{ r_etcd_common_master_peer_key_file }} \
--cacert {{ r_etcd_common_master_peer_ca_file }} \
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index 82ac4fc84..ca8b6a707 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,9 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
deleted file mode 100644
index 8c10c9991..000000000
--- a/roles/openshift_atomic/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-OpenShift Atomic
-================
-
-This role houses atomic specific tasks.
-
-Requirements
-------------
-
-Role Variables
---------------
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-```
-- name: Ensure atomic proxies are defined
- hosts: localhost
- roles:
- - role: openshift_atomic
-```
-
-License
--------
-
-Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
deleted file mode 100644
index ea129f514..000000000
--- a/roles/openshift_atomic/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Atomic related tasks
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- role: lib_openshift
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
index 08045794a..440b8ec28 100644
--- a/roles/openshift_cli/library/openshift_container_binary_sync.py
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class BinarySyncer(object):
"""
- Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ Syncs the openshift, oc, and kubectl binaries/symlinks out of
a container onto the host system.
"""
@@ -108,7 +108,10 @@ class BinarySyncer(object):
# Ensure correct symlinks created:
self._sync_symlink('kubectl', 'openshift')
- self._sync_symlink('oadm', 'openshift')
+
+ # Remove old oadm binary
+ if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+ os.remove(os.path.join(self.bin_dir, 'oadm'))
def _sync_symlink(self, binary_name, link_to):
""" Ensure the given binary name exists and links to the expected binary. """
diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
index 53e8b448b..3d51abc52 100644
--- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
+++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2
@@ -5,7 +5,7 @@ items:
kind: ServiceAccount
metadata:
name: dockergc
- # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+ # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
# in order for the dockergc to access the docker socket and root directory
- apiVersion: extensions/v1beta1
kind: DaemonSet
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index b371d347c..d2bd7357a 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -94,8 +94,7 @@ def migrate_admission_plugin_facts(facts):
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
@@ -854,7 +853,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below.
# If we've added items to the kubelet_args dict then we need
# to merge the new items back into the main facts object.
if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
+ facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
return facts
@@ -876,7 +875,7 @@ def build_controller_args(facts):
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
+ facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
@@ -898,7 +897,7 @@ def build_api_server_args(facts):
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
+ facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
@@ -1085,7 +1084,7 @@ def apply_provider_facts(facts, provider_facts):
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
-def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
@@ -1093,14 +1092,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
- protected_facts = ['ha']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
@@ -1132,14 +1128,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
@@ -1149,18 +1138,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
@@ -1433,7 +1410,6 @@ def set_container_facts_if_unset(facts):
facts['node']['ovs_system_image'] = ovs_image
if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
facts['common']['client_binary'] = '/usr/local/bin/oc'
return facts
@@ -1494,8 +1470,6 @@ class OpenShiftFacts(object):
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Raises:
OpenShiftFactsUnsupportedRoleError:
@@ -1513,8 +1487,7 @@ class OpenShiftFacts(object):
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None,
openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ openshift_env_structures=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -1538,15 +1511,13 @@ class OpenShiftFacts(object):
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ openshift_env_structures)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite,
openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
+ openshift_env_structures):
""" Generate facts
Args:
@@ -1554,16 +1525,13 @@ class OpenShiftFacts(object):
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite,
openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ openshift_env_structures)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
@@ -1581,8 +1549,7 @@ class OpenShiftFacts(object):
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
@@ -1627,7 +1594,7 @@ class OpenShiftFacts(object):
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
+ client_binary='oc',
dns_domain='cluster.local',
config_base='/etc/origin')
@@ -1778,8 +1745,7 @@ class OpenShiftFacts(object):
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None,
openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
+ openshift_env_structures=None):
""" Initialize the local facts
Args:
@@ -1787,8 +1753,6 @@ class OpenShiftFacts(object):
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
Returns:
@@ -1830,8 +1794,7 @@ class OpenShiftFacts(object):
facts_to_set = merge_facts(orig=facts_to_set,
new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
+ additive_facts_to_overwrite=[])
local_facts = get_local_facts_from_file(self.filename)
@@ -1839,8 +1802,7 @@ class OpenShiftFacts(object):
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
+ additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
@@ -1949,8 +1911,7 @@ def main():
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
+ openshift_env_structures=dict(default=[], type='list', required=False)
),
supports_check_mode=True,
add_file_common_args=True,
@@ -1968,7 +1929,6 @@ def main():
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
openshift_env = module.params['openshift_env'] # noqa: F405
openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
@@ -1977,8 +1937,7 @@ def main():
local_facts,
additive_facts_to_overwrite,
openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
+ openshift_env_structures)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index ca04d2243..8b342a5b4 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -32,7 +32,7 @@
when:
- openshift_docker_alternative_creds | default(False) | bool
- oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create_alt
notify:
- restart master api
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 23386f11b..450f6d803 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull master system container image
command: >
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 67f697924..87ceb8103 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -33,9 +33,9 @@ Notes
Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
````
> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index ae4916761..d9f3e920d 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -44,13 +44,6 @@
- name: include node installer
include_tasks: install.yml
-- name: Restart cri-o
- systemd:
- name: cri-o
- enabled: yes
- state: restarted
- when: openshift_use_crio
-
- name: restart NetworkManager to ensure resolv.conf is present
systemd:
name: NetworkManager
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index eb8d9a6a5..98a391890 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,8 +1,4 @@
---
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
- name: Pre-pull node system container image
command: >
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index d33e172c1..b61bc84c1 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -7,11 +7,6 @@
l_service_name: "{{ openshift_docker_service_name }}"
when: not openshift_use_crio
-- name: Ensure proxies are in the atomic.conf
- include_role:
- name: openshift_atomic
- tasks_from: proxy
-
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 6e8792446..e543d753c 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -66,7 +66,7 @@
- name: "Set anyuid permissions for efs"
command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
register: efs_output
failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr