diff options
Diffstat (limited to 'roles')
197 files changed, 2397 insertions, 1312 deletions
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml index 0e3863304..bbc6edd48 100644 --- a/roles/calico/tasks/main.yml +++ b/roles/calico/tasks/main.yml @@ -14,7 +14,6 @@ vars: etcd_cert_prefix: calico.etcd- etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}" diff --git a/roles/container_runtime/README.md b/roles/container_runtime/README.md index e363c1714..51f469aaf 100644 --- a/roles/container_runtime/README.md +++ b/roles/container_runtime/README.md @@ -1,18 +1,23 @@ -Docker +Container Runtime ========= Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file -Requirements ------------- +This role is designed to be used with include_role and tasks_from. -Ansible 2.2 +Entry points +------------ +* package_docker.yml - install and setup docker container runtime. +* systemcontainer_docker.yml - utilize docker + systemcontainer +* systemcontainer_crio.yml - utilize crio + systemcontainer +* registry_auth.yml - place docker login credentials. -Mandator Role Variables --------------- +Requirements +------------ +Ansible 2.4 Dependencies @@ -24,9 +29,9 @@ Example Playbook ---------------- - hosts: servers - roles: - - role: container_runtime - docker_udev_workaround: "true" + tasks: + - include_role: container_runtime + tasks_from: package_docker.yml License ------- @@ -36,4 +41,4 @@ ASL 2.0 Author Information ------------------ -OpenShift operations, Red Hat, Inc +Red Hat, Inc diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index 62b3e141a..dd185cb38 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -59,6 +59,7 @@ docker_default_storage_path: /var/lib/docker # Set local versions of facts that must be in json format for container-daemon.json # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson l_docker_log_options: "{{ l2_docker_log_options | to_json }}" +l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}" l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}" l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}" l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}" @@ -71,10 +72,62 @@ docker_no_proxy: "{{ openshift.common.no_proxy | default('') }}" openshift_use_crio: False openshift_use_crio_only: False +l_openshift_image_tag_default: "{{ openshift_release | default('latest') }}" +l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}" +# --------------------- # +# systemcontainers_crio # +# --------------------- # l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}" l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}" l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}" -l_openshift_image_tag_default: "{{ openshift_release }}" -l_openshift_image_tag: "{{ openshift_image_tag | default(l_openshift_image_tag_default) | string}}" + +openshift_crio_image_tag_default: "latest" + +l_crt_crio_image_tag_dict: + openshift-enterprise: "{{ l_openshift_image_tag }}" + origin: "{{ openshift_crio_image_tag | default(openshift_crio_image_tag_default) }}" + +l_crt_crio_image_prepend_dict: + openshift-enterprise: "registry.access.redhat.com/openshift3" + origin: "docker.io/gscrivano" + +l_crt_crio_image_dict: + Fedora: + crio_image_name: "cri-o-fedora" + crio_image_tag: "latest" + CentOS: + crio_image_name: "cri-o-centos" + crio_image_tag: "latest" + RedHat: + crio_image_name: "cri-o" + crio_image_tag: "{{ openshift_crio_image_tag | default(l_crt_crio_image_tag_dict[openshift_deployment_type]) }}" + +l_crio_image_prepend: "{{ l_crt_crio_image_prepend_dict[openshift_deployment_type] }}" +l_crio_image_name: "{{ l_crt_crio_image_dict[ansible_distribution]['crio_image_name'] }}" +l_crio_image_tag: "{{ l_crt_crio_image_dict[ansible_distribution] }}" + +l_crio_image_default: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" +l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio_image_default) }}" + +# ----------------------- # +# systemcontainers_docker # +# ----------------------- # +l_crt_docker_image_prepend_dict: + Fedora: "registry.fedoraproject.org/latest" + Centos: "docker.io/gscrivano" + RedHat: "registry.access.redhat.com/openshift3" + +openshift_docker_image_tag_default: "latest" +l_crt_docker_image_tag_dict: + openshift-enterprise: "{{ l_openshift_image_tag }}" + origin: "{{ openshift_docker_image_tag | default(openshift_docker_image_tag_default) }}" + +l_docker_image_prepend: "{{ l_crt_docker_image_prepend_dict[ansible_distribution] }}" +l_docker_image_tag: "{{ l_crt_docker_image_tag_dict[openshift_deployment_type] }}" + +l_docker_image_default: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" +l_docker_image: "{{ openshift_docker_systemcontainer_image_override | default(l_docker_image_default) }}" + +l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/container_runtime/tasks/common/atomic_proxy.yml index dde099984..dde099984 100644 --- a/roles/openshift_atomic/tasks/proxy.yml +++ b/roles/container_runtime/tasks/common/atomic_proxy.yml diff --git a/roles/container_runtime/tasks/common/post.yml b/roles/container_runtime/tasks/common/post.yml new file mode 100644 index 000000000..d790eb2c0 --- /dev/null +++ b/roles/container_runtime/tasks/common/post.yml @@ -0,0 +1,26 @@ +--- +- name: Ensure /var/lib/containers exists + file: + path: /var/lib/containers + state: directory + +- name: Fix SELinux Permissions on /var/lib/containers + command: "restorecon -R /var/lib/containers/" + changed_when: false + +- meta: flush_handlers + +# This needs to run after docker is restarted to account for proxy settings. +# registry_auth is called directly with include_role in some places, so we +# have to put it in the root of the tasks/ directory. +- include_tasks: ../registry_auth.yml + +- name: stat the docker data dir + stat: + path: "{{ docker_default_storage_path }}" + register: dockerstat + +- include_tasks: setup_docker_symlink.yml + when: + - openshift_use_crio + - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) diff --git a/roles/container_runtime/tasks/common/pre.yml b/roles/container_runtime/tasks/common/pre.yml new file mode 100644 index 000000000..990fe66da --- /dev/null +++ b/roles/container_runtime/tasks/common/pre.yml @@ -0,0 +1,12 @@ +--- +- include_tasks: udev_workaround.yml + when: docker_udev_workaround | default(False) | bool + +- name: Add enterprise registry, if necessary + set_fact: + l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" + when: + - openshift.common.deployment_type == 'openshift-enterprise' + - openshift_docker_ent_reg != '' + - openshift_docker_ent_reg not in l2_docker_additional_registries + - not openshift_use_crio_only | bool diff --git a/roles/container_runtime/tasks/common/setup_docker_symlink.yml b/roles/container_runtime/tasks/common/setup_docker_symlink.yml new file mode 100644 index 000000000..d7aeb192e --- /dev/null +++ b/roles/container_runtime/tasks/common/setup_docker_symlink.yml @@ -0,0 +1,38 @@ +--- +- block: + - name: stop the current running docker + systemd: + state: stopped + name: "{{ openshift_docker_service_name }}" + + - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" + command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc != 0 + + - name: "Set the selinux context on {{ docker_alt_storage_path }}" + command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" + register: results + failed_when: + - results.rc == 1 + - "'already exists' not in results.stderr" + + - name: "restorecon the {{ docker_alt_storage_path }}" + command: "restorecon -r {{ docker_alt_storage_path }}" + + - name: Remove the old docker location + file: + state: absent + path: "{{ docker_default_storage_path }}" + + - name: Setup the link + file: + state: link + src: "{{ docker_alt_storage_path }}" + path: "{{ docker_default_storage_path }}" + + - name: start docker + systemd: + state: started + name: "{{ openshift_docker_service_name }}" diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml new file mode 100644 index 000000000..715ed492d --- /dev/null +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -0,0 +1,28 @@ +--- + +- name: Ensure container-selinux is installed + package: + name: container-selinux + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +# Used to pull and install the system container +- name: Ensure atomic is installed + package: + name: atomic + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +# At the time of writing the atomic command requires runc for it's own use. This +# task is here in the even that the atomic package ever removes the dependency. +- name: Ensure runc is installed + package: + name: runc + state: present + when: not openshift.common.is_atomic | bool + register: result + until: result | success diff --git a/roles/container_runtime/tasks/udev_workaround.yml b/roles/container_runtime/tasks/common/udev_workaround.yml index 257c3123d..257c3123d 100644 --- a/roles/container_runtime/tasks/udev_workaround.yml +++ b/roles/container_runtime/tasks/common/udev_workaround.yml diff --git a/roles/container_runtime/tasks/docker_sanity.yml b/roles/container_runtime/tasks/docker_sanity.yml new file mode 100644 index 000000000..e62cf5505 --- /dev/null +++ b/roles/container_runtime/tasks/docker_sanity.yml @@ -0,0 +1,27 @@ +--- +# Sanity checks to ensure the role will complete and provide helpful error +# messages for common problems. + +- name: Error out if Docker pre-installed but too old + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined + +- name: Error out if requested Docker is too old + fail: + msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." + when: docker_version is defined and docker_version | version_compare('1.9.1', '<') + +# If a docker_version was requested, sanity check that we can install or upgrade to it, and +# no downgrade is required. +- name: Fail if Docker version requested but downgrade is required + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') + +# This involves an extremely slow migration process, users should instead run the +# Docker 1.10 upgrade playbook to accomplish this. +- name: Error out if attempting to upgrade Docker across the 1.10 boundary + fail: + msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml new file mode 100644 index 000000000..f29619f42 --- /dev/null +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -0,0 +1,67 @@ +--- + +# This snippet determines if a Docker upgrade is required by checking the inventory +# variables, the available packages, and sets l_docker_upgrade to True if so. + +- set_fact: + docker_upgrade: True + when: docker_upgrade is not defined + +- name: Check if Docker is installed + command: rpm -q docker + args: + warn: no + register: pkg_check + failed_when: pkg_check.rc > 1 + changed_when: no + +- name: Get current version of Docker + command: "{{ repoquery_installed }} --qf '%{version}' docker" + register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded + changed_when: false + +- name: Get latest available version of Docker + command: > + {{ repoquery_cmd }} --qf '%{version}' "docker" + register: avail_docker_version + retries: 4 + until: avail_docker_version | succeeded + # Don't expect docker rpm to be available on hosts that don't already have it installed: + when: pkg_check.rc == 0 + failed_when: false + changed_when: false + +- fail: + msg: This playbook requires access to Docker 1.12 or later + # Disable the 1.12 requirement if the user set a specific Docker version + when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<'))) + +# Default l_docker_upgrade to False, we'll set to True if an upgrade is required: +- set_fact: + l_docker_upgrade: False + +# Make sure a docker_version is set if none was requested: +- set_fact: + docker_version: "{{ avail_docker_version.stdout }}" + when: pkg_check.rc == 0 and docker_version is not defined + +- name: Flag for Docker upgrade if necessary + set_fact: + l_docker_upgrade: True + when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<') + +# Additional checks for Atomic hosts: +- name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + +- set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + +- fail: + msg: This playbook requires access to Docker 1.12 or later + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/roles/container_runtime/tasks/main.yml b/roles/container_runtime/tasks/main.yml index 6d68082b1..96d8606c6 100644 --- a/roles/container_runtime/tasks/main.yml +++ b/roles/container_runtime/tasks/main.yml @@ -1,85 +1,2 @@ --- -- include_tasks: udev_workaround.yml - when: docker_udev_workaround | default(False) | bool - -- name: Add enterprise registry, if necessary - set_fact: - l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}" - when: - - openshift.common.deployment_type == 'openshift-enterprise' - - openshift_docker_ent_reg != '' - - openshift_docker_ent_reg not in l2_docker_additional_registries - - not openshift_use_crio_only | bool - -- name: Use Package Docker if Requested - include_tasks: package_docker.yml - when: - - not openshift_docker_use_system_container - - not openshift_use_crio_only - -- name: Ensure /var/lib/containers exists - file: - path: /var/lib/containers - state: directory - -- name: Fix SELinux Permissions on /var/lib/containers - command: "restorecon -R /var/lib/containers/" - changed_when: false - -- name: Use System Container Docker if Requested - include_tasks: systemcontainer_docker.yml - when: - - openshift_docker_use_system_container - - not openshift_use_crio_only - -- name: Add CRI-O usage Requested - include_tasks: systemcontainer_crio.yml - when: - - openshift_use_crio - - openshift_docker_is_node_or_master | bool - -- name: stat the docker data dir - stat: - path: "{{ docker_default_storage_path }}" - register: dockerstat - -- when: - - openshift_use_crio - - dockerstat.stat.islnk is defined and not (dockerstat.stat.islnk | bool) - block: - - name: stop the current running docker - systemd: - state: stopped - name: "{{ openshift_docker_service_name }}" - - - name: copy "{{ docker_default_storage_path }}" to "{{ docker_alt_storage_path }}" - command: "cp -r {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" - register: results - failed_when: - - results.rc != 0 - - - name: "Set the selinux context on {{ docker_alt_storage_path }}" - command: "semanage fcontext -a -e {{ docker_default_storage_path }} {{ docker_alt_storage_path }}" - register: results - failed_when: - - results.rc == 1 - - "'already exists' not in results.stderr" - - - name: "restorecon the {{ docker_alt_storage_path }}" - command: "restorecon -r {{ docker_alt_storage_path }}" - - - name: Remove the old docker location - file: - state: absent - path: "{{ docker_default_storage_path }}" - - - name: Setup the link - file: - state: link - src: "{{ docker_alt_storage_path }}" - path: "{{ docker_default_storage_path }}" - - - name: start docker - systemd: - state: started - name: "{{ openshift_docker_service_name }}" +# This role is meant to be used with include_role and tasks_from. diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index 40ab75a25..89899c9cf 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -1,4 +1,6 @@ --- +- include_tasks: common/pre.yml + - name: Get current installed Docker version command: "{{ repoquery_installed }} --qf '%{version}' docker" when: not openshift.common.is_atomic | bool @@ -7,35 +9,16 @@ until: curr_docker_version | succeeded changed_when: false -- name: Error out if Docker pre-installed but too old - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined - -- name: Error out if requested Docker is too old - fail: - msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." - when: docker_version is defined and docker_version | version_compare('1.9.1', '<') - -# If a docker_version was requested, sanity check that we can install or upgrade to it, and -# no downgrade is required. -- name: Fail if Docker version requested but downgrade is required - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') - -# This involves an extremely slow migration process, users should instead run the -# Docker 1.10 upgrade playbook to accomplish this. -- name: Error out if attempting to upgrade Docker across the 1.10 boundary - fail: - msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') +# Some basic checks to ensure the role will complete +- include_tasks: docker_sanity.yml # Make sure Docker is installed, but does not update a running version. # Docker upgrades are handled by a separate playbook. # Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed. - name: Install Docker - package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present + package: + name: "docker{{ '-' + docker_version if docker_version is defined else '' }}" + state: present when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != '' register: result until: result | success @@ -161,7 +144,4 @@ - set_fact: docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}" -- meta: flush_handlers - -# This needs to run after docker is restarted to account for proxy settings. -- include_tasks: registry_auth.yml +- include_tasks: common/post.yml diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 48e33f0aa..61f122f3c 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -1,39 +1,14 @@ --- # TODO: Much of this file is shared with container engine tasks - -- name: Ensure container-selinux is installed - package: - name: container-selinux - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - - name: Check we are not using node as a Docker container with CRI-O fail: msg='Cannot use CRI-O with node configured as a Docker container' when: - openshift.common.is_containerized | bool - - not openshift.common.is_node_system_container | bool - -# Used to pull and install the system container -- name: Ensure atomic is installed - package: - name: atomic - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# At the time of writing the atomic command requires runc for it's own use. This -# task is here in the even that the atomic package ever removes the dependency. -- name: Ensure runc is installed - package: - name: runc - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success + - not l_is_node_system_container | bool + +- include_tasks: common/pre.yml +- include_tasks: common/syscontainer_packages.yml - name: Check that overlay is in the kernel shell: lsmod | grep overlay @@ -60,50 +35,11 @@ state: restarted - name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - -- block: - - - name: Set CRI-O image defaults - set_fact: - l_crio_image_prepend: "docker.io/gscrivano" - l_crio_image_name: "cri-o-fedora" - l_crio_image_tag: "latest" - - - name: Use Centos based image when distribution is CentOS - set_fact: - l_crio_image_name: "cri-o-centos" - when: ansible_distribution == "CentOS" - - - name: Set CRI-O image tag - set_fact: - l_crio_image_tag: "{{ l_openshift_image_tag }}" - when: - - openshift_deployment_type == 'openshift-enterprise' - - - name: Use RHEL based image when distribution is Red Hat - set_fact: - l_crio_image_prepend: "registry.access.redhat.com/openshift3" - l_crio_image_name: "cri-o" - when: ansible_distribution == "RedHat" - - - name: Set the full image name - set_fact: - l_crio_image: "{{ l_crio_image_prepend }}/{{ l_crio_image_name }}:{{ l_crio_image_tag }}" - - # For https://github.com/openshift/aos-cd-jobs/pull/624#pullrequestreview-61816548 - - name: Use a specific image if requested - set_fact: - l_crio_image: "{{ openshift_crio_systemcontainer_image_override }}" - when: - - openshift_crio_systemcontainer_image_override is defined - - openshift_crio_systemcontainer_image_override != "" - - # Be nice and let the user see the variable result - - debug: - var: l_crio_image + include_tasks: common/atomic_proxy.yml + +# Be nice and let the user see the variable result +- debug: + var: l_crio_image # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull CRI-O System Container image @@ -112,7 +48,6 @@ environment: NO_PROXY: "{{ openshift.common.no_proxy | default('') }}" - - name: Install CRI-O System Container oc_atomic_container: name: "cri-o" @@ -154,10 +89,8 @@ daemon_reload: yes register: start_result -- meta: flush_handlers - # If we are using crio only, docker.service might not be available for # 'docker login' -- include_tasks: registry_auth.yml +- include_tasks: common/post.yml vars: openshift_docker_alternative_creds: "{{ openshift_use_crio_only }}" diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 84217e50c..10570fe34 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -11,32 +11,9 @@ traditional docker package install. Otherwise, comment out openshift_docker_options in your inventory file. -- name: Ensure container-selinux is installed - package: - name: container-selinux - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -# Used to pull and install the system container -- name: Ensure atomic is installed - package: - name: atomic - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success +- include_tasks: common/pre.yml -# At the time of writing the atomic command requires runc for it's own use. This -# task is here in the even that the atomic package ever removes the dependency. -- name: Ensure runc is installed - package: - name: runc - state: present - when: not openshift.common.is_atomic | bool - register: result - until: result | success +- include_tasks: common/syscontainer_packages.yml # Make sure Docker is installed so we are able to use the client - name: Install Docker so we can use the client @@ -59,48 +36,11 @@ delay: 30 - name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - -- block: - - - name: Set to default prepend - set_fact: - l_docker_image_prepend: "gscrivano" - l_docker_image_tag: "latest" - - - name: Set container engine image tag - set_fact: - l_docker_image_tag: "{{ l_openshift_image_tag }}" - when: - - openshift_deployment_type == 'openshift-enterprise' - - - name: Use Red Hat Registry for image when distribution is Red Hat - set_fact: - l_docker_image_prepend: "registry.access.redhat.com/openshift3" - when: ansible_distribution == 'RedHat' - - - name: Use Fedora Registry for image when distribution is Fedora - set_fact: - l_docker_image_prepend: "registry.fedoraproject.org/f25" - when: ansible_distribution == 'Fedora' - - - name: Set the full image name - set_fact: - l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift_docker_service_name }}:{{ l_docker_image_tag }}" - - # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959 - - name: Use a specific image if requested - set_fact: - l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}" - when: - - openshift_docker_systemcontainer_image_override is defined - - openshift_docker_systemcontainer_image_override != "" - - # Be nice and let the user see the variable result - - debug: - var: l_docker_image + include_tasks: common/atomic_proxy.yml + +# Be nice and let the user see the variable result +- debug: + var: l_docker_image # NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released - name: Pre-pull Container Engine System Container image @@ -154,10 +94,8 @@ - set_fact: docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}" -- meta: flush_handlers - # Since docker is running as a system container, docker login will fail to create # credentials. Use alternate method if requiring authenticated registries. -- include_tasks: registry_auth.yml +- include_tasks: common/post.yml vars: openshift_docker_alternative_creds: True diff --git a/roles/container_runtime/templates/crio.conf.j2 b/roles/container_runtime/templates/crio.conf.j2 index 3f066a17f..0a1ff2e0a 100644 --- a/roles/container_runtime/templates/crio.conf.j2 +++ b/roles/container_runtime/templates/crio.conf.j2 @@ -27,7 +27,7 @@ storage_option = [ [crio.api] # listen is the path to the AF_LOCAL socket on which crio will listen. -listen = "/var/run/crio.sock" +listen = "/var/run/crio/crio.sock" # stream_address is the IP address on which the stream server will listen stream_address = "" diff --git a/roles/container_runtime/templates/daemon.json b/roles/container_runtime/templates/daemon.json index 383963bd3..1a72d812a 100644 --- a/roles/container_runtime/templates/daemon.json +++ b/roles/container_runtime/templates/daemon.json @@ -5,10 +5,10 @@ "disable-legacy-registry": false, "exec-opts": ["native.cgroupdriver=systemd"], "insecure-registries": {{ l_docker_insecure_registries }}, -{% if openshift_docker_log_driver is defined %} +{% if openshift_docker_log_driver %} "log-driver": "{{ openshift_docker_log_driver }}", {%- endif %} - "log-opts": {{ l_docker_log_options }}, + "log-opts": {{ l_docker_log_options_dict }}, "runtimes": { "oci": { "path": "/usr/libexec/docker/docker-runc-current" diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index a2c2f98a7..52b9d09dd 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -21,7 +21,7 @@ dependencies: etcd_client_port: 22379 etcd_conf_dir: /etc/contiv-etcd/ etcd_data_dir: /var/lib/contiv-etcd/ - etcd_ca_host: "{{ inventory_hostname }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_config_dir: /etc/contiv-etcd/ etcd_url_scheme: http etcd_peer_url_scheme: http diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml index f679443e0..8a928ea54 100644 --- a/roles/contiv/tasks/default_network.yml +++ b/roles/contiv/tasks/default_network.yml @@ -8,51 +8,64 @@ - name: Contiv | Set globals command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}' + run_once: true - name: Contiv | Set arp mode to flood if ACI command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood' when: contiv_fabric_mode == "aci" + run_once: true - name: Contiv | Check if default-net exists command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls' register: net_result + run_once: true - name: Contiv | Create default-net command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net' when: net_result.stdout.find("default-net") == -1 + run_once: true - name: Contiv | Create host access infra network for VxLan routing case command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1' when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing") + run_once: true #- name: Contiv | Create an allow-all policy for the default-group # command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy' # when: contiv_fabric_mode == "aci" +# run_once: true - name: Contiv | Set up aci external contract to consume default external contract command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true - name: Contiv | Set up aci external contract to provide default external contract command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true - name: Contiv | Create aci default-group command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group' when: contiv_fabric_mode == "aci" + run_once: true - name: Contiv | Add external contracts to the default-group command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group' when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true) + run_once: true #- name: Contiv | Add policy rule 1 for allow-all policy # command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1' # when: contiv_fabric_mode == "aci" +# run_once: true #- name: Contiv | Add policy rule 2 for allow-all policy # command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2' # when: contiv_fabric_mode == "aci" +# run_once: true - name: Contiv | Create default aci app profile command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}' when: contiv_fabric_mode == "aci" + run_once: true diff --git a/roles/contiv/tasks/netmaster_iptables.yml b/roles/contiv/tasks/netmaster_iptables.yml index 07bb16ea7..c98e7b6a5 100644 --- a/roles/contiv/tasks/netmaster_iptables.yml +++ b/roles/contiv/tasks/netmaster_iptables.yml @@ -13,9 +13,15 @@ - name: Netmaster IPtables | Open Netmaster with iptables command: /sbin/iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv" with_items: - - "{{ netmaster_port }}" - "{{ contiv_rpc_port1 }}" - "{{ contiv_rpc_port2 }}" - "{{ contiv_rpc_port3 }}" when: iptablesrules.stdout.find("contiv") == -1 notify: Save iptables rules + +- name: Netmaster IPtables | Open netmaster main port + command: /sbin/iptables -I INPUT 1 -p tcp -s {{ item }} --dport {{ netmaster_port }} -j ACCEPT -m comment --comment "contiv" + with_items: + - "{{ groups.oo_nodes_to_config|difference(hostvars[inventory_hostname]['ansible_' + netmaster_interface].ipv4.address)|list }}" + when: iptablesrules.stdout.find("contiv") == -1 + notify: Save iptables rules diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml index 62b4716a3..a4d260279 100644 --- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml +++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml @@ -27,7 +27,7 @@ - name: PkgMgr RHEL/CentOS | Install ovs yum: - pkg=openvswitch-2.5.0-2.el7.x86_64 + pkg=openvswitch state=present environment: http_proxy: "{{ http_proxy|default('') }}" diff --git a/roles/contiv/templates/aci-gw.service b/roles/contiv/templates/aci-gw.service index 90bb98001..9b3f12567 100644 --- a/roles/contiv/templates/aci-gw.service +++ b/roles/contiv/templates/aci-gw.service @@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service time-sync.target {{ openshift ExecStart={{ bin_dir }}/aci_gw.sh start ExecStop={{ bin_dir }}/aci_gw.sh stop KillMode=control-group -Restart=on-failure +Restart=always RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/netmaster.service b/roles/contiv/templates/netmaster.service index a602c955e..ce7d0c75e 100644 --- a/roles/contiv/templates/netmaster.service +++ b/roles/contiv/templates/netmaster.service @@ -6,5 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service EnvironmentFile=/etc/default/netmaster ExecStart={{ bin_dir }}/netmaster $NETMASTER_ARGS KillMode=control-group -Restart=on-failure +Restart=always RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv/templates/netplugin.service b/roles/contiv/templates/netplugin.service index dc7b95bb5..6358d89ec 100644 --- a/roles/contiv/templates/netplugin.service +++ b/roles/contiv/templates/netplugin.service @@ -6,3 +6,8 @@ After=auditd.service systemd-user-sessions.service contiv-etcd.service EnvironmentFile=/etc/default/netplugin ExecStart={{ bin_dir }}/netplugin $NETPLUGIN_ARGS KillMode=control-group +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target diff --git a/roles/contiv_facts/tasks/rpm.yml b/roles/contiv_facts/tasks/rpm.yml index 07401a6dd..d12436f96 100644 --- a/roles/contiv_facts/tasks/rpm.yml +++ b/roles/contiv_facts/tasks/rpm.yml @@ -6,10 +6,17 @@ failed_when: false check_mode: no +- name: RPM | Determine if firewalld enabled + command: "systemctl status firewalld.service" + register: ss + changed_when: false + failed_when: false + check_mode: no + - name: Set the has_firewalld fact set_fact: has_firewalld: true - when: s.rc == 0 + when: s.rc == 0 and ss.rc == 0 - name: Determine if iptables-services installed command: "rpm -q iptables-services" diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 3038ed9f6..86cea5c46 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -10,7 +10,7 @@ r_etcd_common_embedded_etcd: false osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' etcd_image_dict: - origin: "registry.fedoraproject.org/f26/etcd" + origin: "registry.fedoraproject.org/latest/etcd" openshift-enterprise: "{{ osm_etcd_image }}" etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}" diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index 879ca4f4e..f2e1fc310 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -19,3 +19,4 @@ dependencies: - role: lib_openshift - role: lib_os_firewall - role: lib_utils +- role: openshift_facts diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index 4bdc6bcc3..a4b0ff31d 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -11,7 +11,7 @@ - name: Re-introduce leases (as a replacement for key TTLs) command: > - oadm migrate etcd-ttl \ + {{ openshift.common.client_binary }} adm migrate etcd-ttl \ --cert {{ r_etcd_common_master_peer_cert_file }} \ --key {{ r_etcd_common_master_peer_key_file }} \ --cacert {{ r_etcd_common_master_peer_ca_file }} \ diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml index 82ac4fc84..ca8b6a707 100644 --- a/roles/etcd/tasks/system_container.yml +++ b/roles/etcd/tasks/system_container.yml @@ -1,9 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - - name: Pull etcd system container command: atomic pull --storage=ostree {{ etcd_image }} register: pull_result diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml index 488b6b0bc..2e4a0dc39 100644 --- a/roles/flannel/defaults/main.yaml +++ b/roles/flannel/defaults/main.yaml @@ -2,8 +2,8 @@ flannel_interface: "{{ ansible_default_ipv4.interface }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" -etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/{{ 'ca' if (embedded_etcd | bool) else 'flannel.etcd-ca' }}.crt" -etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.crt" -etcd_peer_key_file: "{{ openshift.common.config_base }}/node/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'flannel.etcd-client' }}.key" +etcd_peer_ca_file: "{{ openshift.common.config_base }}/node/flannel.etcd-ca.crt" +etcd_peer_cert_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.crt" +etcd_peer_key_file: "{{ openshift.common.config_base }}/node/flannel.etcd-client.key" openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 80e4d391d..705d39f9a 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -15,7 +15,7 @@ - name: restart node systemd: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" state: restarted register: l_restart_node_result until: not l_restart_node_result | failed diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml index 1d0f5df6a..cd11fd9ff 100644 --- a/roles/flannel_register/defaults/main.yaml +++ b/roles/flannel_register/defaults/main.yaml @@ -4,6 +4,6 @@ flannel_subnet_len: "{{ 32 - (openshift.master.sdn_host_subnet_length | int) }}" flannel_etcd_key: /openshift.com/network etcd_hosts: "{{ etcd_urls }}" etcd_conf_dir: "{{ openshift.common.config_base }}/master" -etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}" +etcd_peer_ca_file: "{{ etcd_conf_dir + '/master.etcd-ca.crt' }}" etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt" etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key" diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml index ffe814713..08f2d5adc 100644 --- a/roles/kuryr/tasks/node.yaml +++ b/roles/kuryr/tasks/node.yaml @@ -36,7 +36,7 @@ - name: Configure OpenShift node with disabled service proxy lineinfile: - dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node" + dest: "/etc/sysconfig/{{ openshift_service_type }}-node" regexp: '^OPTIONS="?(.*?)"?$' backrefs: yes backup: yes @@ -44,5 +44,5 @@ - name: force node restart to disable the proxy service: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" state: restarted diff --git a/roles/lib_utils/library/oo_ec2_group.py b/roles/lib_utils/library/oo_ec2_group.py new file mode 100644 index 000000000..615021ac5 --- /dev/null +++ b/roles/lib_utils/library/oo_ec2_group.py @@ -0,0 +1,903 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# pylint: skip-file +# flake8: noqa + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['stableinterface'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: ec2_group +author: "Andrew de Quincey (@adq)" +version_added: "1.3" +requirements: [ boto3 ] +short_description: maintain an ec2 VPC security group. +description: + - maintains ec2 security groups. This module has a dependency on python-boto >= 2.5 +options: + name: + description: + - Name of the security group. + - One of and only one of I(name) or I(group_id) is required. + - Required if I(state=present). + required: false + group_id: + description: + - Id of group to delete (works only with absent). + - One of and only one of I(name) or I(group_id) is required. + required: false + version_added: "2.4" + description: + description: + - Description of the security group. Required when C(state) is C(present). + required: false + vpc_id: + description: + - ID of the VPC to create the group in. + required: false + rules: + description: + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, + no inbound rules will be enabled. Rules list may include its own name in `group_name`. + This allows idempotent loopback additions (e.g. allow group to access itself). + Rule sources list support was added in version 2.4. This allows to define multiple sources per + source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. + required: false + rules_egress: + description: + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, + a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. + Rule Egress sources list support was added in version 2.4. + required: false + version_added: "1.6" + state: + version_added: "1.4" + description: + - Create or delete a security group + required: false + default: 'present' + choices: [ "present", "absent" ] + aliases: [] + purge_rules: + version_added: "1.8" + description: + - Purge existing rules on security group that are not found in rules + required: false + default: 'true' + aliases: [] + purge_rules_egress: + version_added: "1.8" + description: + - Purge existing rules_egress on security group that are not found in rules_egress + required: false + default: 'true' + aliases: [] + tags: + version_added: "2.4" + description: + - A dictionary of one or more tags to assign to the security group. + required: false + purge_tags: + version_added: "2.4" + description: + - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then + tags will not be modified. + required: false + default: yes + choices: [ 'yes', 'no' ] + +extends_documentation_fragment: + - aws + - ec2 + +notes: + - If a rule declares a group_name and that group doesn't exist, it will be + automatically created. In that case, group_desc should be provided as well. + The module will refuse to create a depended-on group without a description. +''' + +EXAMPLES = ''' +- name: example ec2 group + ec2_group: + name: example + description: an example EC2 group + vpc_id: 12345 + region: eu-west-1 + aws_secret_key: SECRET + aws_access_key: ACCESS + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg + - proto: udp + from_port: 10050 + to_port: 10050 + cidr_ip: 10.0.0.0/8 + - proto: udp + from_port: 10051 + to_port: 10051 + group_id: sg-12345678 + - proto: icmp + from_port: 8 # icmp type, -1 = any type + to_port: -1 # icmp subtype, -1 = any subtype + cidr_ip: 10.0.0.0/8 + - proto: all + # the containing group name may be specified here + group_name: example + rules_egress: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + cidr_ipv6: 64:ff9b::/96 + group_name: example-other + # description to use if example-other needs to be created + group_desc: other example EC2 group + +- name: example2 ec2 group + ec2_group: + name: example2 + description: an example2 EC2 group + vpc_id: 12345 + region: eu-west-1 + rules: + # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). + - proto: tcp + ports: 22 + group_name: example-vpn + - proto: tcp + ports: + - 80 + - 443 + - 8080-8099 + cidr_ip: 0.0.0.0/0 + # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. + - proto: tcp + ports: + - 6379 + - 26379 + group_name: + - example-vpn + - example-redis + - proto: tcp + ports: 5665 + group_name: example-vpn + cidr_ip: + - 172.16.1.0/24 + - 172.16.17.0/24 + cidr_ipv6: + - 2607:F8B0::/32 + - 64:ff9b::/96 + group_id: + - sg-edcd9784 + +- name: "Delete group by its id" + ec2_group: + group_id: sg-33b4ee5b + state: absent +''' + +RETURN = ''' +group_name: + description: Security group name + sample: My Security Group + type: string + returned: on create/update +group_id: + description: Security group id + sample: sg-abcd1234 + type: string + returned: on create/update +description: + description: Description of security group + sample: My Security Group + type: string + returned: on create/update +tags: + description: Tags associated with the security group + sample: + Name: My Security Group + Purpose: protecting stuff + type: dict + returned: on create/update +vpc_id: + description: ID of VPC to which the security group belongs + sample: vpc-abcd1234 + type: string + returned: on create/update +ip_permissions: + description: Inbound rules associated with the security group. + sample: + - from_port: 8182 + ip_protocol: tcp + ip_ranges: + - cidr_ip: "1.1.1.1/32" + ipv6_ranges: [] + prefix_list_ids: [] + to_port: 8182 + user_id_group_pairs: [] + type: list + returned: on create/update +ip_permissions_egress: + description: Outbound rules associated with the security group. + sample: + - ip_protocol: -1 + ip_ranges: + - cidr_ip: "0.0.0.0/0" + ipv6_ranges: [] + prefix_list_ids: [] + user_id_group_pairs: [] + type: list + returned: on create/update +owner_id: + description: AWS Account ID of the security group + sample: 123456789012 + type: int + returned: on create/update +''' + +import json +import re +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import boto3_conn +from ansible.module_utils.ec2 import get_aws_connection_info +from ansible.module_utils.ec2 import ec2_argument_spec +from ansible.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.ec2 import HAS_BOTO3 +from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags +from ansible.module_utils.ec2 import AWSRetry +import traceback + +try: + import botocore +except ImportError: + pass # caught by imported HAS_BOTO3 + + +@AWSRetry.backoff(tries=5, delay=5, backoff=2.0) +def get_security_groups_with_backoff(connection, **kwargs): + return connection.describe_security_groups(**kwargs) + + +def deduplicate_rules_args(rules): + """Returns unique rules""" + if rules is None: + return None + return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) + + +def make_rule_key(prefix, rule, group_id, cidr_ip): + if 'proto' in rule: + proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] + elif 'IpProtocol' in rule: + proto, from_port, to_port = [rule.get(x, None) for x in ('IpProtocol', 'FromPort', 'ToPort')] + if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: + from_port = 'none' + to_port = 'none' + key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) + return key.lower().replace('-none', '-None') + + +def add_rules_to_lookup(ipPermissions, group_id, prefix, dict): + for rule in ipPermissions: + for groupGrant in rule.get('UserIdGroupPairs', []): + dict[make_rule_key(prefix, rule, group_id, groupGrant.get('GroupId'))] = (rule, groupGrant) + for ipv4Grants in rule.get('IpRanges', []): + dict[make_rule_key(prefix, rule, group_id, ipv4Grants.get('CidrIp'))] = (rule, ipv4Grants) + for ipv6Grants in rule.get('Ipv6Ranges', []): + dict[make_rule_key(prefix, rule, group_id, ipv6Grants.get('CidrIpv6'))] = (rule, ipv6Grants) + + +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port') + if not isinstance(rule, dict): + module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): + """ + Returns tuple of (group_id, ip) after validating rule params. + + rule: Dict describing a rule. + name: Name of the security group being managed. + groups: Dict of all available security groups. + + AWS accepts an ip range or a security group as target of a rule. This + function validate the rule specification and return either a non-None + group_id or a non-None ip range. + """ + + FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' + group_id = None + group_name = None + ip = None + ipv6 = None + target_group_created = False + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_id OR cidr_ip, not both") + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg="Specify group_name OR cidr_ip, not both") + elif 'group_id' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") + elif 'group_name' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: + module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") + elif rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = dict(GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + elif 'group_id' in rule: + group_id = rule['group_id'] + elif 'group_name' in rule: + group_name = rule['group_name'] + if group_name == name: + group_id = group['GroupId'] + groups[group_id] = group + groups[group_name] = group + elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): + # both are VPC groups, this is ok + group_id = groups[group_name]['GroupId'] + elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): + # both are EC2 classic, this is ok + group_id = groups[group_name]['GroupId'] + else: + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + if not rule.get('group_desc', '').strip(): + module.fail_json(msg="group %s will be automatically created by rule %s and " + "no description was provided" % (group_name, rule)) + if not module.check_mode: + params = dict(GroupName=group_name, Description=rule['group_desc']) + if vpc_id: + params['VpcId'] = vpc_id + auto_group = client.create_security_group(**params) + group_id = auto_group['GroupId'] + groups[group_id] = auto_group + groups[group_name] = auto_group + target_group_created = True + elif 'cidr_ip' in rule: + ip = rule['cidr_ip'] + elif 'cidr_ipv6' in rule: + ipv6 = rule['cidr_ipv6'] + + return group_id, ip, ipv6, target_group_created + + +def ports_expand(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + if not isinstance(port, str): + ports_expanded.append((port,) * 2) + elif '-' in port: + ports_expanded.append(tuple(p.strip() for p in port.split('-', 1))) + else: + ports_expanded.append((port.strip(),) * 2) + + return ports_expanded + + +def rule_expand_ports(rule): + # takes a rule dict and returns a list of expanded rule dicts + if 'ports' not in rule: + return [rule] + + ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + + rule_expanded = [] + for from_to in ports_expand(ports): + temp_rule = rule.copy() + del temp_rule['ports'] + temp_rule['from_port'], temp_rule['to_port'] = from_to + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rules_expand_ports(rules): + # takes a list of rules and expands it based on 'ports' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_ports(rule_complex)] + + +def rule_expand_source(rule, source_type): + # takes a rule dict and returns a list of expanded rule dicts for specified source_type + sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] + source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') + + rule_expanded = [] + for source in sources: + temp_rule = rule.copy() + for s in source_types_all: + temp_rule.pop(s, None) + temp_rule[source_type] = source + rule_expanded.append(temp_rule) + + return rule_expanded + + +def rule_expand_sources(rule): + # takes a rule dict and returns a list of expanded rule discts + source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name') if stype in rule) + + return [r for stype in source_types + for r in rule_expand_source(rule, stype)] + + +def rules_expand_sources(rules): + # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' + if not rules: + return rules + + return [rule for rule_complex in rules + for rule in rule_expand_sources(rule_complex)] + + +def authorize_ip(type, changed, client, group, groupRules, + ip, ip_permission, module, rule, ethertype): + # If rule already exists, don't later delete it + for thisip in ip: + rule_id = make_rule_key(type, rule, group['GroupId'], thisip) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_ip_grant(rule, thisip, ethertype) + if ip_permission: + try: + if type == "in": + client.authorize_security_group_ingress(GroupId=group['GroupId'], + IpPermissions=[ip_permission]) + elif type == "out": + client.authorize_security_group_egress(GroupId=group['GroupId'], + IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to authorize %s for ip %s security group '%s' - %s" % + (type, thisip, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + return changed, ip_permission + + +def serialize_group_grant(group_id, rule): + permission = {'IpProtocol': rule['proto'], + 'FromPort': rule['from_port'], + 'ToPort': rule['to_port'], + 'UserIdGroupPairs': [{'GroupId': group_id}]} + + return fix_port_and_protocol(permission) + + +def serialize_revoke(grant, rule): + permission = dict() + fromPort = rule['FromPort'] if 'FromPort' in rule else None + toPort = rule['ToPort'] if 'ToPort' in rule else None + if 'GroupId' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'UserIdGroupPairs': [{'GroupId': grant['GroupId']}] + } + elif 'CidrIp' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'IpRanges': [grant] + } + elif 'CidrIpv6' in grant: + permission = {'IpProtocol': rule['IpProtocol'], + 'FromPort': fromPort, + 'ToPort': toPort, + 'Ipv6Ranges': [grant] + } + return fix_port_and_protocol(permission) + + +def serialize_ip_grant(rule, thisip, ethertype): + permission = {'IpProtocol': rule['proto'], + 'FromPort': rule['from_port'], + 'ToPort': rule['to_port']} + if ethertype == "ipv4": + permission['IpRanges'] = [{'CidrIp': thisip}] + elif ethertype == "ipv6": + permission['Ipv6Ranges'] = [{'CidrIpv6': thisip}] + + return fix_port_and_protocol(permission) + + +def fix_port_and_protocol(permission): + for key in ['FromPort', 'ToPort']: + if key in permission: + if permission[key] is None: + del permission[key] + else: + permission[key] = int(permission[key]) + + permission['IpProtocol'] = str(permission['IpProtocol']) + + return permission + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + name=dict(), + group_id=dict(), + description=dict(), + vpc_id=dict(), + rules=dict(type='list'), + rules_egress=dict(type='list'), + state=dict(default='present', type='str', choices=['present', 'absent']), + purge_rules=dict(default=True, required=False, type='bool'), + purge_rules_egress=dict(default=True, required=False, type='bool'), + tags=dict(required=False, type='dict', aliases=['resource_tags']), + purge_tags=dict(default=True, required=False, type='bool') + ) + ) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_one_of=[['name', 'group_id']], + required_if=[['state', 'present', ['name']]], + ) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + name = module.params['name'] + group_id = module.params['group_id'] + description = module.params['description'] + vpc_id = module.params['vpc_id'] + rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules']))) + rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(module.params['rules_egress']))) + state = module.params.get('state') + purge_rules = module.params['purge_rules'] + purge_rules_egress = module.params['purge_rules_egress'] + tags = module.params['tags'] + purge_tags = module.params['purge_tags'] + + if state == 'present' and not description: + module.fail_json(msg='Must provide description when state is present.') + + changed = False + region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) + if not region: + module.fail_json(msg="The AWS region must be specified as an " + "environment variable or in the AWS credentials " + "profile.") + client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params) + group = None + groups = dict() + security_groups = [] + # do get all security groups + # find if the group is present + try: + response = get_security_groups_with_backoff(client) + security_groups = response.get('SecurityGroups', []) + except botocore.exceptions.NoCredentialsError as e: + module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc()) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(), + **camel_dict_to_snake_dict(e.response)) + + for sg in security_groups: + groups[sg['GroupId']] = sg + groupName = sg['GroupName'] + if groupName in groups: + # Prioritise groups from the current VPC + # even if current VPC is EC2-Classic + if groups[groupName].get('VpcId') == vpc_id: + # Group saved already matches current VPC, change nothing + pass + elif vpc_id is None and groups[groupName].get('VpcId') is None: + # We're in EC2 classic, and the group already saved is as well + # No VPC groups can be used alongside EC2 classic groups + pass + else: + # the current SG stored has no direct match, so we can replace it + groups[groupName] = sg + else: + groups[groupName] = sg + + if group_id and sg['GroupId'] == group_id: + group = sg + elif groupName == name and (vpc_id is None or sg.get('VpcId') == vpc_id): + group = sg + + # Ensure requested group is absent + if state == 'absent': + if group: + # found a match, delete it + try: + if not module.check_mode: + client.delete_security_group(GroupId=group['GroupId']) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + else: + group = None + changed = True + else: + # no match found, no changes required + pass + + # Ensure requested group is present + elif state == 'present': + if group: + # existing group + if group['Description'] != description: + module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") + + # if the group doesn't exist, create it now + else: + # no match found, create it + if not module.check_mode: + params = dict(GroupName=name, Description=description) + if vpc_id: + params['VpcId'] = vpc_id + group = client.create_security_group(**params) + # When a group is created, an egress_rule ALLOW ALL + # to 0.0.0.0/0 is added automatically but it's not + # reflected in the object returned by the AWS API + # call. We re-read the group for getting an updated object + # amazon sometimes takes a couple seconds to update the security group so wait till it exists + while True: + group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + if group.get('VpcId') and not group.get('IpPermissionsEgress'): + pass + else: + break + + changed = True + + if tags is not None: + current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) + tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) + if tags_to_delete: + try: + client.delete_tags(Resources=[group['GroupId']], Tags=[{'Key': tag} for tag in tags_to_delete]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + # Add/update tags + if tags_need_modify: + try: + client.create_tags(Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) + except botocore.exceptions.ClientError as e: + module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + else: + module.fail_json(msg="Unsupported state requested: %s" % state) + + # create a lookup for all existing rules on the group + ip_permission = [] + if group: + # Manage ingress rules + groupRules = {} + add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules) + # Now, go through all provided rules and ensure they are there. + if rules is not None: + for rule in rules: + validate_rule(module, rule) + group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, + group, groups, vpc_id) + if target_group_created: + changed = True + + if rule['proto'] in ('all', '-1', -1): + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + if group_id: + rule_id = make_rule_key('in', rule, group['GroupId'], group_id) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_group_grant(group_id, rule) + if ip_permission: + ips = ip_permission + if vpc_id: + [useridpair.update({'VpcId': vpc_id}) for useridpair in + ip_permission.get('UserIdGroupPairs', [])] + try: + client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ips]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to authorize ingress for group %s security group '%s' - %s" % + (group_id, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + elif ip: + # Convert ip to list we can iterate over + if ip and not isinstance(ip, list): + ip = [ip] + + changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ip, ip_permission, + module, rule, "ipv4") + elif ipv6: + # Convert ip to list we can iterate over + if not isinstance(ipv6, list): + ipv6 = [ipv6] + # If rule already exists, don't later delete it + changed, ip_permission = authorize_ip("in", changed, client, group, groupRules, ipv6, ip_permission, + module, rule, "ipv6") + # Finally, remove anything left in the groupRules -- these will be defunct rules + if purge_rules: + for (rule, grant) in groupRules.values(): + ip_permission = serialize_revoke(grant, rule) + if not module.check_mode: + try: + client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to revoke ingress for security group '%s' - %s" % + (group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + # Manage egress rules + groupRules = {} + add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules) + # Now, go through all provided rules and ensure they are there. + if rules_egress is not None: + for rule in rules_egress: + validate_rule(module, rule) + group_id, ip, ipv6, target_group_created = get_target_from_rule(module, client, rule, name, + group, groups, vpc_id) + if target_group_created: + changed = True + + if rule['proto'] in ('all', '-1', -1): + rule['proto'] = -1 + rule['from_port'] = None + rule['to_port'] = None + + if group_id: + rule_id = make_rule_key('out', rule, group['GroupId'], group_id) + if rule_id in groupRules: + del groupRules[rule_id] + else: + if not module.check_mode: + ip_permission = serialize_group_grant(group_id, rule) + if ip_permission: + ips = ip_permission + if vpc_id: + [useridpair.update({'VpcId': vpc_id}) for useridpair in + ip_permission.get('UserIdGroupPairs', [])] + try: + client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ips]) + except botocore.exceptions.ClientError as e: + module.fail_json( + msg="Unable to authorize egress for group %s security group '%s' - %s" % + (group_id, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + elif ip: + # Convert ip to list we can iterate over + if not isinstance(ip, list): + ip = [ip] + changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ip, + ip_permission, module, rule, "ipv4") + elif ipv6: + # Convert ip to list we can iterate over + if not isinstance(ipv6, list): + ipv6 = [ipv6] + # If rule already exists, don't later delete it + changed, ip_permission = authorize_ip("out", changed, client, group, groupRules, ipv6, + ip_permission, module, rule, "ipv6") + elif vpc_id is not None: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + default_egress_rule = 'out--1-None-None-' + group['GroupId'] + '-0.0.0.0/0' + if default_egress_rule not in groupRules: + if not module.check_mode: + ip_permission = [{'IpProtocol': '-1', + 'IpRanges': [{'CidrIp': '0.0.0.0/0'}] + } + ] + try: + client.authorize_security_group_egress(GroupId=group['GroupId'], IpPermissions=ip_permission) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to authorize egress for ip %s security group '%s' - %s" % + ('0.0.0.0/0', + group['GroupName'], + e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + else: + # make sure the default egress rule is not removed + del groupRules[default_egress_rule] + + # Finally, remove anything left in the groupRules -- these will be defunct rules + if purge_rules_egress and vpc_id is not None: + for (rule, grant) in groupRules.values(): + # we shouldn't be revoking 0.0.0.0 egress + if grant != '0.0.0.0/0': + ip_permission = serialize_revoke(grant, rule) + if not module.check_mode: + try: + client.revoke_security_group_egress(GroupId=group['GroupId'], IpPermissions=[ip_permission]) + except botocore.exceptions.ClientError as e: + module.fail_json(msg="Unable to revoke egress for ip %s security group '%s' - %s" % + (grant, group['GroupName'], e), + exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) + changed = True + + if group: + security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] + security_group = camel_dict_to_snake_dict(security_group) + security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', []), + tag_name_key_name='key', tag_value_key_name='value') + module.exit_json(changed=changed, **security_group) + else: + module.exit_json(changed=changed, group_id=None) + + +if __name__ == '__main__': + main() diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index cb83c8ead..7b55dda56 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,6 +1,6 @@ --- - name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted + systemd: name={{ openshift_service_type }}-master-api state=restarted when: > (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) @@ -8,7 +8,7 @@ # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml index e68ae74bd..ede6f2125 100644 --- a/roles/nuage_node/handlers/main.yaml +++ b/roles/nuage_node/handlers/main.yaml @@ -1,7 +1,7 @@ --- - name: restart node become: yes - systemd: name={{ openshift.common.service_type }}-node daemon-reload=yes state=restarted + systemd: name={{ openshift_service_type }}-node daemon-reload=yes state=restarted - name: save iptable rules become: yes diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index fdf01b7c2..88d62de49 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -23,5 +23,5 @@ cni_conf_dir: "/etc/cni/net.d/" cni_bin_dir: "/opt/cni/bin/" nuage_plugin_crt_dir: /usr/share/vsp-openshift -openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node +openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift_service_type }}-node nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md deleted file mode 100644 index 8c10c9991..000000000 --- a/roles/openshift_atomic/README.md +++ /dev/null @@ -1,28 +0,0 @@ -OpenShift Atomic -================ - -This role houses atomic specific tasks. - -Requirements ------------- - -Role Variables --------------- - -Dependencies ------------- - -Example Playbook ----------------- - -``` -- name: Ensure atomic proxies are defined - hosts: localhost - roles: - - role: openshift_atomic -``` - -License -------- - -Apache License Version 2.0 diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml deleted file mode 100644 index ea129f514..000000000 --- a/roles/openshift_atomic/meta/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -galaxy_info: - author: OpenShift - description: Atomic related tasks - company: Red Hat, Inc - license: ASL 2.0 - min_ansible_version: 2.2 - platforms: - - name: EL - versions: - - 7 -dependencies: -- role: lib_openshift diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 42ef22846..74e5d1dde 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -6,9 +6,7 @@ openshift_aws_create_security_groups: True openshift_aws_create_launch_config: True openshift_aws_create_scale_group: True -openshift_aws_current_version: '' -openshift_aws_new_version: '' - +openshift_aws_node_group_upgrade: False openshift_aws_wait_for_ssh: True openshift_aws_clusterid: default @@ -19,7 +17,6 @@ openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}" openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external" openshift_aws_iam_cert_path: '' openshift_aws_iam_cert_key_path: '' -openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift" openshift_aws_iam_role_name: openshift_node_describe_instances openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}" @@ -34,14 +31,12 @@ openshift_aws_ami_name: openshift-gi openshift_aws_base_ami_name: ami_base openshift_aws_launch_config_bootstrap_token: '' -openshift_aws_launch_config_basename: "{{ openshift_aws_clusterid }}" openshift_aws_users: [] openshift_aws_ami_tags: bootstrap: "true" openshift-created: "true" - clusterid: "{{ openshift_aws_clusterid }}" parent: "{{ openshift_aws_base_ami | default('unknown') }}" openshift_aws_s3_mode: create @@ -124,6 +119,20 @@ openshift_aws_ami_map: infra: "{{ openshift_aws_ami }}" compute: "{{ openshift_aws_ami }}" +openshift_aws_master_group: +- name: "{{ openshift_aws_clusterid }} master group" + group: master + +openshift_aws_node_groups: +- name: "{{ openshift_aws_clusterid }} compute group" + group: compute +- name: "{{ openshift_aws_clusterid }} infra group" + group: infra + +openshift_aws_created_asgs: [] +openshift_aws_current_asgs: [] + +# these will be used during upgrade openshift_aws_master_group_config: # The 'master' key is always required here. master: @@ -139,7 +148,6 @@ openshift_aws_master_group_config: host-type: master sub-host-type: default runtime: docker - version: "{{ openshift_aws_new_version }}" wait_for_instances: True termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" @@ -163,7 +171,6 @@ openshift_aws_node_group_config: host-type: node sub-host-type: compute runtime: docker - version: "{{ openshift_aws_new_version }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" @@ -183,7 +190,6 @@ openshift_aws_node_group_config: host-type: node sub-host-type: infra runtime: docker - version: "{{ openshift_aws_new_version }}" termination_policy: "{{ openshift_aws_node_group_termination_policy }}" replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" iam_role: "{{ openshift_aws_iam_role_name }}" @@ -283,21 +289,4 @@ openshift_aws_node_run_bootstrap_startup: True openshift_aws_node_user_data: '' openshift_aws_node_config_namespace: openshift-node -openshift_aws_node_groups: nodes - openshift_aws_masters_groups: masters,etcd,nodes - -# If creating extra node groups, you'll need to define all of the following - -# The format is the same as openshift_aws_node_group_config, but the top-level -# key names should be different (ie, not == master or infra). -# openshift_aws_node_group_config_extra: {} - -# This variable should look like openshift_aws_launch_config_security_groups -# and contain a one-to-one mapping of top level keys that are defined in -# openshift_aws_node_group_config_extra. -# openshift_aws_launch_config_security_groups_extra: {} - -# openshift_aws_node_security_groups_extra: {} - -# openshift_aws_ami_map_extra: {} diff --git a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py index e707abd3f..dfcb11da3 100644 --- a/roles/openshift_aws/filter_plugins/openshift_aws_filters.py +++ b/roles/openshift_aws/filter_plugins/openshift_aws_filters.py @@ -4,11 +4,43 @@ Custom filters for use in openshift_aws ''' +from ansible import errors + class FilterModule(object): ''' Custom ansible filters for use by openshift_aws role''' @staticmethod + def scale_groups_serial(scale_group_info, upgrade=False): + ''' This function will determine what the deployment serial should be and return it + + Search through the tags and find the deployment_serial tag. Once found, + determine if an increment is needed during an upgrade. + if upgrade is true then increment the serial and return it + else return the serial + ''' + if scale_group_info == []: + return 1 + + scale_group_info = scale_group_info[0] + + if not isinstance(scale_group_info, dict): + raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict") + + serial = None + + for tag in scale_group_info['tags']: + if tag['key'] == 'deployment_serial': + serial = int(tag['value']) + if upgrade: + serial += 1 + break + else: + raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found") + + return serial + + @staticmethod def scale_groups_match_capacity(scale_group_info): ''' This function will verify that the scale group instance count matches the scale group desired capacity @@ -38,4 +70,5 @@ class FilterModule(object): def filters(self): ''' returns a mapping of filters to methods ''' return {'build_instance_tags': self.build_instance_tags, - 'scale_groups_match_capacity': self.scale_groups_match_capacity} + 'scale_groups_match_capacity': self.scale_groups_match_capacity, + 'scale_groups_serial': self.scale_groups_serial} diff --git a/roles/openshift_aws/tasks/accept_nodes.yml b/roles/openshift_aws/tasks/accept_nodes.yml index ae320962f..c2a2cea30 100644 --- a/roles/openshift_aws/tasks/accept_nodes.yml +++ b/roles/openshift_aws/tasks/accept_nodes.yml @@ -1,6 +1,6 @@ --- - name: fetch masters - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region | default('us-east-1') }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, @@ -11,7 +11,7 @@ until: "'instances' in mastersout and mastersout.instances|length > 0" - name: fetch new node instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, @@ -22,9 +22,14 @@ delay: 3 until: "'instances' in instancesout and instancesout.instances|length > 0" -- debug: +- name: Dump the private dns names + debug: msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" +- name: Dump the master public ip address + debug: + msg: "{{ mastersout.instances[0].public_ip_address }}" + - name: approve nodes oc_adm_csr: #approve_all: True diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 2c1e88cfb..7fb617dd5 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -1,6 +1,4 @@ --- -# This task file expects l_nodes_to_build to be passed in. - # When openshift_aws_use_custom_ami is '' then # we retrieve the latest build AMI. # Then set openshift_aws_ami to the ami. @@ -26,6 +24,35 @@ # Need to set epoch time in one place to use for launch_config and scale_group - set_fact: l_epoch_time: "{{ ansible_date_time.epoch }}" +# +# query asg's and determine if we need to create the others. +# if we find more than 1 for each type, then exit +- name: query all asg's for this cluster + ec2_asg_facts: + region: "{{ openshift_aws_region }}" + tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}" + register: asgs + +- fail: + msg: "Found more than 1 auto scaling group that matches the query for group: {{ openshift_aws_node_group }}" + when: + - asgs.results|length > 1 + +- debug: + msg: "{{ asgs }}" + +- name: set the value for the deployment_serial and the current asgs + set_fact: + l_deployment_serial: "{{ openshift_aws_node_group_deployment_serial if openshift_aws_node_group_deployment_serial is defined else asgs.results | scale_groups_serial(openshift_aws_node_group_upgrade) }}" + openshift_aws_current_asgs: "{{ asgs.results | map(attribute='auto_scaling_group_name') | list | union(openshift_aws_current_asgs) }}" + +- name: dump deployment serial + debug: + msg: "Deployment serial: {{ l_deployment_serial }}" + +- name: dump current_asgs + debug: + msg: "openshift_aws_current_asgs: {{ openshift_aws_current_asgs }}" - when: openshift_aws_create_iam_role include_tasks: iam_role.yml diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml index d9910d938..cf3bb28fb 100644 --- a/roles/openshift_aws/tasks/iam_role.yml +++ b/roles/openshift_aws/tasks/iam_role.yml @@ -13,11 +13,10 @@ ##### - name: Create an iam role iam_role: - name: "{{ item.value.iam_role }}" + name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}" assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}" state: "{{ openshift_aws_iam_role_state | default('present') }}" - when: item.value.iam_role is defined - with_dict: "{{ l_nodes_to_build }}" + when: l_node_group_config[openshift_aws_node_group.group].iam_role is defined ##### # The second part of this task file is linking the role to a policy @@ -28,9 +27,8 @@ - name: create an iam policy iam_policy: iam_type: role - iam_name: "{{ item.value.iam_role }}" - policy_json: "{{ item.value.policy_json }}" - policy_name: "{{ item.value.policy_name }}" + iam_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role }}" + policy_json: "{{ l_node_group_config[openshift_aws_node_group.group].policy_json }}" + policy_name: "{{ l_node_group_config[openshift_aws_node_group.group].policy_name }}" state: "{{ openshift_aws_iam_role_state | default('present') }}" - when: item.value.iam_role is defined - with_dict: "{{ l_nodes_to_build }}" + when: "'iam_role' in l_node_group_config[openshift_aws_node_group.group]" diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml index fed80b7eb..6f78ee00a 100644 --- a/roles/openshift_aws/tasks/launch_config.yml +++ b/roles/openshift_aws/tasks/launch_config.yml @@ -1,15 +1,26 @@ --- -- fail: - msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image." - when: - - openshift_aws_ami is undefined +- name: fetch the security groups for launch config + ec2_group_facts: + filters: + group-name: "{{ openshift_aws_launch_config_security_groups[openshift_aws_node_group.group] }}" + vpc-id: "{{ vpcout.vpcs[0].id }}" + region: "{{ openshift_aws_region }}" + register: ec2sgs -- fail: - msg: "Ensure that openshift_deployment_type is defined." - when: - - openshift_deployment_type is undefined - -- include_tasks: launch_config_create.yml - with_dict: "{{ l_nodes_to_build }}" - loop_control: - loop_var: launch_config_item +# Create the scale group config +- name: Create the node scale group launch config + ec2_lc: + name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}" + region: "{{ openshift_aws_region }}" + image_id: "{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}" + instance_type: "{{ l_node_group_config[openshift_aws_node_group.group].instance_type }}" + security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" + instance_profile_name: "{{ l_node_group_config[openshift_aws_node_group.group].iam_role if l_node_group_config[openshift_aws_node_group.group].iam_role is defined and + l_node_group_config[openshift_aws_node_group.group].iam_role != '' and + openshift_aws_create_iam_role + else omit }}" + user_data: "{{ lookup('template', 'user_data.j2') }}" + key_name: "{{ openshift_aws_ssh_key_name }}" + ebs_optimized: False + volumes: "{{ l_node_group_config[openshift_aws_node_group.group].volumes }}" + assign_public_ip: True diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml deleted file mode 100644 index f7f0f0953..000000000 --- a/roles/openshift_aws/tasks/launch_config_create.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: fetch the security groups for launch config - ec2_group_facts: - filters: - group-name: "{{ l_launch_config_security_groups[launch_config_item.key] }}" - vpc-id: "{{ vpcout.vpcs[0].id }}" - region: "{{ openshift_aws_region }}" - register: ec2sgs - -# Create the scale group config -- name: Create the node scale group launch config - ec2_lc: - name: "{{ openshift_aws_launch_config_basename }}-{{ launch_config_item.key }}{{'-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}" - region: "{{ openshift_aws_region }}" - image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}" - instance_type: "{{ launch_config_item.value.instance_type }}" - security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}" - instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and - launch_config_item.value.iam_role != '' and - openshift_aws_create_iam_role - else omit }}" - user_data: "{{ lookup('template', 'user_data.j2') }}" - key_name: "{{ openshift_aws_ssh_key_name }}" - ebs_optimized: False - volumes: "{{ launch_config_item.value.volumes }}" - assign_public_ip: True diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml index 06f649343..786a2e4cf 100644 --- a/roles/openshift_aws/tasks/provision.yml +++ b/roles/openshift_aws/tasks/provision.yml @@ -20,13 +20,14 @@ - name: include scale group creation for master include_tasks: build_node_group.yml + with_items: "{{ openshift_aws_master_group }}" vars: - l_nodes_to_build: "{{ openshift_aws_master_group_config }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" - l_aws_ami_map: "{{ openshift_aws_ami_map }}" + l_node_group_config: "{{ openshift_aws_master_group_config }}" + loop_control: + loop_var: openshift_aws_node_group - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" diff --git a/roles/openshift_aws/tasks/provision_instance.yml b/roles/openshift_aws/tasks/provision_instance.yml index 8cc75cd0c..696b323c0 100644 --- a/roles/openshift_aws/tasks/provision_instance.yml +++ b/roles/openshift_aws/tasks/provision_instance.yml @@ -27,7 +27,7 @@ Name: "{{ openshift_aws_base_ami_name }}" - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:Name": "{{ openshift_aws_base_ami_name }}" diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml index 041ed0791..d82f18574 100644 --- a/roles/openshift_aws/tasks/provision_nodes.yml +++ b/roles/openshift_aws/tasks/provision_nodes.yml @@ -3,7 +3,7 @@ # bootstrap should be created on first master # need to fetch it and shove it into cloud data - name: fetch master instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" @@ -31,20 +31,15 @@ - name: include build compute and infra node groups include_tasks: build_node_group.yml + with_items: "{{ openshift_aws_node_groups }}" vars: - l_nodes_to_build: "{{ openshift_aws_node_group_config }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups }}" - l_aws_ami_map: "{{ openshift_aws_ami_map }}" - -- name: include build node group for extra nodes - include_tasks: build_node_group.yml - when: openshift_aws_node_group_config_extra is defined - vars: - l_nodes_to_build: "{{ openshift_aws_node_group_config_extra | default({}) }}" - l_launch_config_security_groups: "{{ openshift_aws_launch_config_security_groups_extra }}" - l_aws_ami_map: "{{ openshift_aws_ami_map_extra }}" + l_node_group_config: "{{ openshift_aws_node_group_config }}" + loop_control: + loop_var: openshift_aws_node_group # instances aren't scaling fast enough here, we need to wait for them - when: openshift_aws_wait_for_ssh | bool name: wait for our new nodes to come up include_tasks: wait_for_groups.yml + vars: + created_asgs: "{{ openshift_aws_created_asgs }}" diff --git a/roles/openshift_aws/tasks/remove_scale_group.yml b/roles/openshift_aws/tasks/remove_scale_group.yml index 55d1af2b5..a01cde294 100644 --- a/roles/openshift_aws/tasks/remove_scale_group.yml +++ b/roles/openshift_aws/tasks/remove_scale_group.yml @@ -1,10 +1,13 @@ --- +# FIGURE OUT HOW TO REMOVE SCALE GROUPS +# use openshift_aws_current_asgs?? - name: fetch the scale groups ec2_asg_facts: region: "{{ openshift_aws_region }}" + name: "^{{ item }}$" tags: - "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, - 'version': openshift_aws_current_version} }}" + "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}" + with_items: "{{ openshift_aws_current_asgs if openshift_aws_current_asgs != [] else openshift_aws_asgs_to_remove }}" register: qasg - name: remove non-master scale groups @@ -14,7 +17,7 @@ name: "{{ item.auto_scaling_group_name }}" when: "'master' not in item.auto_scaling_group_name" register: asg_results - with_items: "{{ qasg.results }}" + with_items: "{{ qasg | json_query('results[*]') | sum(attribute='results', start=[]) }}" async: 600 poll: 0 diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml index 9cf37c840..ba70bcff6 100644 --- a/roles/openshift_aws/tasks/s3.yml +++ b/roles/openshift_aws/tasks/s3.yml @@ -1,6 +1,6 @@ --- - name: Create an s3 bucket - s3: + aws_s3: bucket: "{{ openshift_aws_s3_bucket_name }}" mode: "{{ openshift_aws_s3_mode }}" region: "{{ openshift_aws_region }}" diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 30df7545d..3632f7ce9 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -1,20 +1,30 @@ --- +- name: set node group name + set_fact: + l_node_group_name: "{{ openshift_aws_node_group.name }} {{ l_deployment_serial }}" + - name: Create the scale group ec2_asg: - name: "{{ openshift_aws_scale_group_basename }} {{ item.key }}" - launch_config_name: "{{ openshift_aws_launch_config_basename }}-{{ item.key }}{{ '-' ~ openshift_aws_new_version if openshift_aws_new_version != '' else '' }}" - health_check_period: "{{ item.value.health_check.period }}" - health_check_type: "{{ item.value.health_check.type }}" - min_size: "{{ item.value.min_size }}" - max_size: "{{ item.value.max_size }}" - desired_capacity: "{{ item.value.desired_size }}" + name: "{{ l_node_group_name }}" + launch_config_name: "{{ openshift_aws_node_group.name }}-{{ openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami) }}-{{ l_epoch_time }}" + health_check_period: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.period }}" + health_check_type: "{{ l_node_group_config[openshift_aws_node_group.group].health_check.type }}" + min_size: "{{ l_node_group_config[openshift_aws_node_group.group].min_size }}" + max_size: "{{ l_node_group_config[openshift_aws_node_group.group].max_size }}" + desired_capacity: "{{ l_node_group_config[openshift_aws_node_group.group].desired_size }}" region: "{{ openshift_aws_region }}" - termination_policies: "{{ item.value.termination_policy if 'termination_policy' in item.value else omit }}" - load_balancers: "{{ item.value.elbs if 'elbs' in item.value else omit }}" - wait_for_instances: "{{ item.value.wait_for_instances | default(False)}}" + termination_policies: "{{ l_node_group_config[openshift_aws_node_group.group].termination_policy if 'termination_policy' in l_node_group_config[openshift_aws_node_group.group] else omit }}" + load_balancers: "{{ l_node_group_config[openshift_aws_node_group.group].elbs if 'elbs' in l_node_group_config[openshift_aws_node_group.group] else omit }}" + wait_for_instances: "{{ l_node_group_config[openshift_aws_node_group.group].wait_for_instances | default(False)}}" vpc_zone_identifier: "{{ subnetout.subnets[0].id }}" replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}" - replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (item.value.replace_all_instances | default(omit)) }}" + replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] + else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}" tags: - - "{{ openshift_aws_node_group_config_tags | combine(item.value.tags) }}" - with_dict: "{{ l_nodes_to_build }}" + - "{{ openshift_aws_node_group_config_tags + | combine(l_node_group_config[openshift_aws_node_group.group].tags) + | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}" + +- name: append the asg name to the openshift_aws_created_asgs fact + set_fact: + openshift_aws_created_asgs: "{{ [l_node_group_name] | union(openshift_aws_created_asgs) | list }}" diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml index 7a3d0fb68..74877d5c7 100644 --- a/roles/openshift_aws/tasks/seal_ami.yml +++ b/roles/openshift_aws/tasks/seal_ami.yml @@ -1,6 +1,6 @@ --- - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:Name": "{{ openshift_aws_base_ami_name }}" @@ -12,7 +12,7 @@ - name: bundle ami ec2_ami: - instance_id: "{{ instancesout.instances.0.id }}" + instance_id: "{{ instancesout.instances.0.instance_id }}" region: "{{ openshift_aws_region }}" state: present description: "This was provisioned {{ ansible_date_time.iso8601 }}" @@ -46,4 +46,4 @@ ec2: state: absent region: "{{ openshift_aws_region }}" - instance_ids: "{{ instancesout.instances.0.id }}" + instance_ids: "{{ instancesout.instances.0.instance_id }}" diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml index 43834079e..0568a0190 100644 --- a/roles/openshift_aws/tasks/security_group.yml +++ b/roles/openshift_aws/tasks/security_group.yml @@ -6,11 +6,27 @@ "tag:Name": "{{ openshift_aws_clusterid }}" register: vpcout -- include_tasks: security_group_create.yml - vars: - l_security_groups: "{{ openshift_aws_node_security_groups }}" +- name: create the node group sgs + oo_ec2_group: + name: "{{ item.value.name}}" + description: "{{ item.value.desc }}" + rules: "{{ item.value.rules if 'rules' in item.value else [] }}" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + +- name: create the k8s sgs for the node group + oo_ec2_group: + name: "{{ item.value.name }}_k8s" + description: "{{ item.value.desc }} for k8s" + region: "{{ openshift_aws_region }}" + vpc_id: "{{ vpcout.vpcs[0].id }}" + with_dict: "{{ openshift_aws_node_security_groups }}" + register: k8s_sg_create -- include_tasks: security_group_create.yml - when: openshift_aws_node_security_groups_extra is defined - vars: - l_security_groups: "{{ openshift_aws_node_security_groups_extra | default({}) }}" +- name: tag sg groups with proper tags + ec2_tag: + tags: "{{ openshift_aws_security_groups_tags }}" + resource: "{{ item.group_id }}" + region: "{{ openshift_aws_region }}" + with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/security_group_create.yml b/roles/openshift_aws/tasks/security_group_create.yml deleted file mode 100644 index ef6060555..000000000 --- a/roles/openshift_aws/tasks/security_group_create.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: create the node group sgs - ec2_group: - name: "{{ item.value.name}}" - description: "{{ item.value.desc }}" - rules: "{{ item.value.rules if 'rules' in item.value else [] }}" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - with_dict: "{{ l_security_groups }}" - -- name: create the k8s sgs for the node group - ec2_group: - name: "{{ item.value.name }}_k8s" - description: "{{ item.value.desc }} for k8s" - region: "{{ openshift_aws_region }}" - vpc_id: "{{ vpcout.vpcs[0].id }}" - with_dict: "{{ l_security_groups }}" - register: k8s_sg_create - -- name: tag sg groups with proper tags - ec2_tag: - tags: "{{ openshift_aws_security_groups_tags }}" - resource: "{{ item.group_id }}" - region: "{{ openshift_aws_region }}" - with_items: "{{ k8s_sg_create.results }}" diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml index 05b68f460..700917ef4 100644 --- a/roles/openshift_aws/tasks/setup_master_group.yml +++ b/roles/openshift_aws/tasks/setup_master_group.yml @@ -8,7 +8,7 @@ msg: "openshift_aws_region={{ openshift_aws_region }}" - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "tag:clusterid": "{{ openshift_aws_clusterid }}" @@ -19,11 +19,13 @@ delay: 3 until: instancesout.instances|length > 0 +- debug: var=instancesout + - name: add new master to masters group add_host: groups: "{{ openshift_aws_masters_groups }}" name: "{{ item.public_dns_name }}" - hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}" + hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}" with_items: "{{ instancesout.instances }}" - name: wait for ssh to become available diff --git a/roles/openshift_aws/tasks/setup_scale_group_facts.yml b/roles/openshift_aws/tasks/setup_scale_group_facts.yml index d65fdc2de..14c5246c9 100644 --- a/roles/openshift_aws/tasks/setup_scale_group_facts.yml +++ b/roles/openshift_aws/tasks/setup_scale_group_facts.yml @@ -1,11 +1,15 @@ --- -- name: group scale group nodes - ec2_remote_facts: +- name: fetch all created instances + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: - "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid }}}" + "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, + 'instance-state-name': 'running'} }}" register: qinstances +# The building of new and current groups is dependent of having a list of the current asgs and the created ones +# that can be found in the variables: openshift_aws_created_asgs, openshift_aws_current_asgs. If these do not exist, we cannot determine which hosts are +# new and which hosts are current. - name: Build new node group add_host: groups: oo_sg_new_nodes @@ -13,10 +17,16 @@ name: "{{ item.public_dns_name }}" hostname: "{{ item.public_dns_name }}" when: - - (item.tags.version | default(False)) == openshift_aws_new_version + - openshift_aws_created_asgs != [] + - "'aws:autoscaling:groupName' in item.tags" + - item.tags['aws:autoscaling:groupName'] in openshift_aws_created_asgs - "'node' in item.tags['host-type']" with_items: "{{ qinstances.instances }}" +- name: dump openshift_aws_current_asgs + debug: + msg: "{{ openshift_aws_current_asgs }}" + - name: Build current node group add_host: groups: oo_sg_current_nodes @@ -24,7 +34,9 @@ name: "{{ item.public_dns_name }}" hostname: "{{ item.public_dns_name }}" when: - - (item.tags.version | default('')) == openshift_aws_current_version + - openshift_aws_current_asgs != [] + - "'aws:autoscaling:groupName' in item.tags" + - item.tags['aws:autoscaling:groupName'] in openshift_aws_current_asgs - "'node' in item.tags['host-type']" with_items: "{{ qinstances.instances }}" diff --git a/roles/openshift_aws/tasks/upgrade_node_group.yml b/roles/openshift_aws/tasks/upgrade_node_group.yml index c3f86f523..4f4730dd6 100644 --- a/roles/openshift_aws/tasks/upgrade_node_group.yml +++ b/roles/openshift_aws/tasks/upgrade_node_group.yml @@ -1,12 +1,22 @@ --- -- fail: - msg: 'Please ensure the current_version and new_version variables are not the same.' +- include_tasks: provision_nodes.yml + vars: + openshift_aws_node_group_upgrade: True when: - - openshift_aws_current_version == openshift_aws_new_version + - openshift_aws_upgrade_provision_nodes | default(True) -- include_tasks: provision_nodes.yml +- debug: var=openshift_aws_current_asgs +- debug: var=openshift_aws_created_asgs + +- name: fail if asg variables aren't set + fail: + msg: "Please ensure that openshift_aws_created_asgs and openshift_aws_current_asgs are defined." + when: + - openshift_aws_created_asgs == [] + - openshift_aws_current_asgs == [] - include_tasks: accept_nodes.yml + when: openshift_aws_upgrade_accept_nodes | default(True) - include_tasks: setup_scale_group_facts.yml diff --git a/roles/openshift_aws/tasks/wait_for_groups.yml b/roles/openshift_aws/tasks/wait_for_groups.yml index 9f1a68a2a..1f4ef3e1c 100644 --- a/roles/openshift_aws/tasks/wait_for_groups.yml +++ b/roles/openshift_aws/tasks/wait_for_groups.yml @@ -1,31 +1,41 @@ --- # The idea here is to wait until all scale groups are at # their desired capacity before continuing. -- name: fetch the scale groups +# This is accomplished with a custom filter_plugin and until clause +- name: "fetch the scale groups" ec2_asg_facts: region: "{{ openshift_aws_region }}" tags: - "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} }}" + "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid } }}" register: qasg - until: qasg.results | scale_groups_match_capacity | bool + until: qasg | json_query('results[*]') | scale_groups_match_capacity | bool delay: 10 retries: 60 +- debug: var=openshift_aws_created_asgs + +# how do we gaurantee the instances are up? - name: fetch newly created instances - ec2_remote_facts: + ec2_instance_facts: region: "{{ openshift_aws_region }}" filters: "{{ {'tag:kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid, - 'tag:version': openshift_aws_new_version} }}" + 'tag:aws:autoscaling:groupName': item, + 'instance-state-name': 'running'} }}" + with_items: "{{ openshift_aws_created_asgs if openshift_aws_created_asgs != [] else qasg | sum(attribute='results', start=[]) }}" register: instancesout until: instancesout.instances|length > 0 delay: 5 retries: 60 +- name: dump instances + debug: + msg: "{{ instancesout.results | sum(attribute='instances', start=[]) }}" + - name: wait for ssh to become available wait_for: port: 22 host: "{{ item.public_ip_address }}" timeout: 300 search_regex: OpenSSH - with_items: "{{ instancesout.instances }}" + with_items: "{{ instancesout.results | sum(attribute='instances', start=[]) }}" diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2 index fe0fe83d4..bda1334cd 100644 --- a/roles/openshift_aws/templates/user_data.j2 +++ b/roles/openshift_aws/templates/user_data.j2 @@ -7,8 +7,8 @@ write_files: owner: 'root:root' permissions: '0640' content: | - openshift_group_type: {{ launch_config_item.key }} -{% if launch_config_item.key != 'master' %} + openshift_group_type: {{ openshift_aws_node_group.group }} +{% if openshift_aws_node_group.group != 'master' %} - path: /etc/origin/node/bootstrap.kubeconfig owner: 'root:root' permissions: '0640' @@ -19,7 +19,7 @@ runcmd: {% if openshift_aws_node_run_bootstrap_startup %} - [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml] {% endif %} -{% if launch_config_item.key != 'master' %} +{% if openshift_aws_node_group.group != 'master' %} - [ systemctl, restart, NetworkManager] - [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] - [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node] diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml index e0b51eee0..612b6522d 100644 --- a/roles/openshift_builddefaults/tasks/main.yml +++ b/roles/openshift_builddefaults/tasks/main.yml @@ -4,11 +4,6 @@ role: builddefaults # TODO: add ability to define builddefaults env vars sort of like this # may need to move the config generation to a filter however. - # openshift_env: "{{ hostvars - # | oo_merge_hostvars(vars, inventory_hostname) - # | oo_openshift_env }}" - # openshift_env_structures: - # - 'openshift.builddefaults.env.*' local_facts: http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}" https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}" diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index f8b784a63..81b49ce60 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info: - system dependencies: - role: openshift_cli +- role: openshift_facts diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 05e0a1352..eb00f13db 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -9,7 +9,7 @@ - name: Install the base package for admin tooling package: - name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: not openshift.common.is_containerized | bool register: install_result diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index 08045794a..440b8ec28 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -27,7 +27,7 @@ class BinarySyncError(Exception): # pylint: disable=too-few-public-methods,too-many-instance-attributes class BinarySyncer(object): """ - Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of + Syncs the openshift, oc, and kubectl binaries/symlinks out of a container onto the host system. """ @@ -108,7 +108,10 @@ class BinarySyncer(object): # Ensure correct symlinks created: self._sync_symlink('kubectl', 'openshift') - self._sync_symlink('oadm', 'openshift') + + # Remove old oadm binary + if os.path.exists(os.path.join(self.bin_dir, 'oadm')): + os.remove(os.path.join(self.bin_dir, 'oadm')) def _sync_symlink(self, binary_name, link_to): """ Ensure the given binary name exists and links to the expected binary. """ diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 140c6ea26..888aa8f0c 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Install clients - package: name={{ openshift.common.service_type }}-clients state=present + package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present when: not openshift.common.is_containerized | bool register: result until: result | success diff --git a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 index 53e8b448b..3d51abc52 100644 --- a/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 +++ b/roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2 @@ -5,7 +5,7 @@ items: kind: ServiceAccount metadata: name: dockergc - # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged + # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged # in order for the dockergc to access the docker socket and root directory - apiVersion: extensions/v1beta1 kind: DaemonSet diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 1d5fba990..68a0e8857 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -5,7 +5,7 @@ # # This script should be run from openshift-ansible/roles/openshift_examples -XPAAS_VERSION=ose-v1.4.6 +XPAAS_VERSION=ose-v1.4.7 ORIGIN_VERSION=${1:-v3.7} RHAMP_TAG=2.0.0.GA EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest new file mode 120000 index 000000000..8cad94b63 --- /dev/null +++ b/roles/openshift_examples/files/examples/latest @@ -0,0 +1 @@ +v3.9
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v3.7/v3.8 b/roles/openshift_examples/files/examples/v3.7/v3.8 deleted file mode 120000 index 8ddcf661c..000000000 --- a/roles/openshift_examples/files/examples/v3.7/v3.8 +++ /dev/null @@ -1 +0,0 @@ -v3.8
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json index 217ef11dd..92be8f42e 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mariadb-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mariadb-persistent", "annotations": { - "openshift.io/display-name": "MariaDB (Persistent)", + "openshift.io/display-name": "MariaDB", "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mariadb", "tags": "database,mariadb", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json index 97e4128a4..4e3e64d48 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mongodb-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mongodb-persistent", "annotations": { - "openshift.io/display-name": "MongoDB (Persistent)", + "openshift.io/display-name": "MongoDB", "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mongodb", "tags": "database,mongodb", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json index 48ac114fd..6ac80f3a0 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/mysql-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "mysql-persistent", "annotations": { - "openshift.io/display-name": "MySQL (Persistent)", + "openshift.io/display-name": "MySQL", "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", "tags": "database,mysql", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json index 8a2d23907..190509112 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/postgresql-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "postgresql-persistent", "annotations": { - "openshift.io/display-name": "PostgreSQL (Persistent)", + "openshift.io/display-name": "PostgreSQL", "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-postgresql", "tags": "database,postgresql", diff --git a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json index e0e0a88d5..d1103d3af 100644 --- a/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/db-templates/redis-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "redis-persistent", "annotations": { - "openshift.io/display-name": "Redis (Persistent)", + "openshift.io/display-name": "Redis", "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-redis", "tags": "database,redis", diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json index e7af160d9..0aa586061 100644 --- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-centos7.json @@ -407,7 +407,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +415,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -485,6 +485,23 @@ "kind": "DockerImage", "name": "centos/python-35-centos7:latest" } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "centos/python-36-centos7:latest" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json index 2b082fc75..7ab9f4e59 100644 --- a/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.8/image-streams/image-streams-rhel7.json @@ -407,7 +407,7 @@ "annotations": { "openshift.io/display-name": "Python (Latest)", "openshift.io/provider-display-name": "Red Hat, Inc.", - "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", + "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.", "iconClass": "icon-python", "tags": "builder,python", "supports":"python", @@ -415,7 +415,7 @@ }, "from": { "kind": "ImageStreamTag", - "name": "3.5" + "name": "3.6" } }, { @@ -485,6 +485,23 @@ "kind": "DockerImage", "name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest" } + }, + { + "name": "3.6", + "annotations": { + "openshift.io/display-name": "Python 3.6", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Python 3.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.6/README.md.", + "iconClass": "icon-python", + "tags": "builder,python", + "supports":"python:3.6,python", + "version": "3.6", + "sampleRepo": "https://github.com/openshift/django-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/rhscl/python-36-rhel7:latest" + } } ] } diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json index 86ddc184a..40b4eaa81 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "cakephp-mysql-persistent", "annotations": { - "openshift.io/display-name": "CakePHP + MySQL (Persistent)", + "openshift.io/display-name": "CakePHP + MySQL", "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.", "tags": "quickstart,php,cakephp", "iconClass": "icon-php", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-persistent" + "template": "cakephp-mysql-persistent", + "app": "cakephp-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json index 3c964bd6a..ecd90e495 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/cakephp-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.", "labels": { - "template": "cakephp-mysql-example" + "template": "cakephp-mysql-example", + "app": "cakephp-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json index 0a10c5fbc..17a155600 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "dancer-mysql-persistent", "annotations": { - "openshift.io/display-name": "Dancer + MySQL (Persistent)", + "openshift.io/display-name": "Dancer + MySQL", "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "tags": "quickstart,perl,dancer", "iconClass": "icon-perl", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-persistent" + "template": "dancer-mysql-persistent", + "app": "dancer-mysql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json index 6122d5436..abf711535 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/dancer-mysql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.", "labels": { - "template": "dancer-mysql-example" + "template": "dancer-mysql-example", + "app": "dancer-mysql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json index f3b5838fa..c8dab0b53 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "django-psql-persistent", "annotations": { - "openshift.io/display-name": "Django + PostgreSQL (Persistent)", + "openshift.io/display-name": "Django + PostgreSQL", "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "tags": "quickstart,python,django", "iconClass": "icon-python", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-persistent" + "template": "django-psql-persistent", + "app": "django-psql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json index b21295df2..6395defda 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/django-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.", "labels": { - "template": "django-psql-example" + "template": "django-psql-example", + "app": "django-psql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json index 3771280bf..d7cab4889 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/httpd.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.", "labels": { - "template": "httpd-example" + "template": "httpd-example", + "app": "httpd-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json index 28b4b9d81..6186bba10 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-ephemeral-template.json @@ -15,6 +15,9 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-ephemeral" + }, "objects": [ { "kind": "Route", diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json index 4915bb12c..59dc0d22b 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/jenkins-persistent-template.json @@ -4,7 +4,7 @@ "metadata": { "name": "jenkins-persistent", "annotations": { - "openshift.io/display-name": "Jenkins (Persistent)", + "openshift.io/display-name": "Jenkins", "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-jenkins", "tags": "instant-app,jenkins", @@ -15,6 +15,9 @@ } }, "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.", + "labels": { + "app": "jenkins-persistent" + }, "objects": [ { "kind": "Route", diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json index 7f2a5d804..f04adaa67 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "nodejs-mongo-persistent", "annotations": { - "openshift.io/display-name": "Node.js + MongoDB (Persistent)", + "openshift.io/display-name": "Node.js + MongoDB", "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "tags": "quickstart,nodejs", "iconClass": "icon-nodejs", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongo-persistent" + "template": "nodejs-mongo-persistent", + "app": "nodejs-mongo-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json index b3afae46e..0ce36dba5 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/nodejs-mongodb.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.", "labels": { - "template": "nodejs-mongodb-example" + "template": "nodejs-mongodb-example", + "app": "nodejs-mongodb-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json index 1c03be28a..10e9382cc 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql-persistent.json @@ -4,7 +4,7 @@ "metadata": { "name": "rails-pgsql-persistent", "annotations": { - "openshift.io/display-name": "Rails + PostgreSQL (Persistent)", + "openshift.io/display-name": "Rails + PostgreSQL", "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "tags": "quickstart,ruby,rails", "iconClass": "icon-ruby", @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-pgsql-persistent" + "template": "rails-pgsql-persistent", + "app": "rails-pgsql-persistent" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json index 240289d33..8ec2c8ea6 100644 --- a/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.8/quickstart-templates/rails-postgresql.json @@ -17,7 +17,8 @@ }, "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.", "labels": { - "template": "rails-postgresql-example" + "template": "rails-postgresql-example", + "app": "rails-postgresql-example" }, "objects": [ { diff --git a/roles/openshift_examples/files/examples/v3.9/latest b/roles/openshift_examples/files/examples/v3.9/latest deleted file mode 120000 index b9bc2fdcb..000000000 --- a/roles/openshift_examples/files/examples/v3.9/latest +++ /dev/null @@ -1 +0,0 @@ -latest
\ No newline at end of file diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md index 80cb88d45..7b43d5adf 100644 --- a/roles/openshift_excluder/README.md +++ b/roles/openshift_excluder/README.md @@ -28,7 +28,7 @@ Role Variables | r_openshift_excluder_verify_upgrade | false | true, false | When upgrading, this variable should be set to true when calling the role | | r_openshift_excluder_package_state | present | present, latest | Use 'latest' to upgrade openshift_excluder package | | r_openshift_excluder_docker_package_state | present | present, latest | Use 'latest' to upgrade docker_excluder package | -| r_openshift_excluder_service_type | None | | (Required) Defined as openshift.common.service_type e.g. atomic-openshift | +| r_openshift_excluder_service_type | None | | (Required) Defined as openshift_service_type e.g. atomic-openshift | | r_openshift_excluder_upgrade_target | None | | Required when r_openshift_excluder_verify_upgrade is true, defined as openshift_upgrade_target by Upgrade playbooks e.g. '3.6'| Dependencies @@ -46,15 +46,12 @@ Example Playbook # Disable all excluders - role: openshift_excluder r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" # Enable all excluders - role: openshift_excluder r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" # Disable all excluders and verify appropriate excluder packages are available for upgrade - role: openshift_excluder r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" r_openshift_excluder_verify_upgrade: true r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" r_openshift_excluder_package_state: latest diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml index d4f151142..3a910e490 100644 --- a/roles/openshift_excluder/defaults/main.yml +++ b/roles/openshift_excluder/defaults/main.yml @@ -2,7 +2,7 @@ # keep the 'current' package or update to 'latest' if available? r_openshift_excluder_package_state: present r_openshift_excluder_docker_package_state: present - +r_openshift_excluder_service_type: "{{ openshift_service_type }}" # Legacy variables are included for backwards compatibility with v3.5 # Inventory variables Legacy # openshift_enable_excluders enable_excluders diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml index 871081c19..a9653edda 100644 --- a/roles/openshift_excluder/meta/main.yml +++ b/roles/openshift_excluder/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info: categories: - cloud dependencies: +- role: openshift_facts - role: lib_utils diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml index 93d6ef149..f0e87ba25 100644 --- a/roles/openshift_excluder/tasks/main.yml +++ b/roles/openshift_excluder/tasks/main.yml @@ -19,11 +19,6 @@ msg: "openshift_excluder role can only be called with 'enable' or 'disable'" when: r_openshift_excluder_action not in ['enable', 'disable'] - - name: Fail if r_openshift_excluder_service_type is not defined - fail: - msg: "r_openshift_excluder_service_type must be specified for this role" - when: r_openshift_excluder_service_type is not defined - - name: Fail if r_openshift_excluder_upgrade_target is not defined fail: msg: "r_openshift_excluder_upgrade_target must be provided when using this role for upgrades" diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index a182d23c5..53a3bc87e 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -98,3 +98,9 @@ openshift_prometheus_alertbuffer_storage_create_pvc: False openshift_router_selector: "region=infra" openshift_hosted_router_selector: "{{ openshift_router_selector }}" openshift_hosted_registry_selector: "{{ openshift_router_selector }}" + +openshift_service_type_dict: + origin: origin + openshift-enterprise: atomic-openshift + +openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index b371d347c..a10ba9310 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -94,8 +94,7 @@ def migrate_admission_plugin_facts(facts): # Merge existing kube_admission_plugin_config with admission_plugin_config. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'], facts['master']['kube_admission_plugin_config'], - additive_facts_to_overwrite=[], - protected_facts_to_overwrite=[]) + additive_facts_to_overwrite=[]) # Remove kube_admission_plugin_config fact facts['master'].pop('kube_admission_plugin_config', None) return facts @@ -459,7 +458,6 @@ def set_url_facts_if_unset(facts): etcd_urls = [] if etcd_hosts != '': facts['master']['etcd_port'] = ports['etcd'] - facts['master']['embedded_etcd'] = False for host in etcd_hosts: etcd_urls.append(format_url(use_ssl['etcd'], host, ports['etcd'])) @@ -538,7 +536,7 @@ def set_aggregate_facts(facts): def set_deployment_facts_if_unset(facts): """ Set Facts that vary based on deployment_type. This currently - includes common.service_type, master.registry_url, node.registry_url, + includes master.registry_url, node.registry_url, node.storage_plugin_deps Args: @@ -550,14 +548,6 @@ def set_deployment_facts_if_unset(facts): # disabled to avoid breaking up facts related to deployment type into # multiple methods for now. # pylint: disable=too-many-statements, too-many-branches - if 'common' in facts: - deployment_type = facts['common']['deployment_type'] - if 'service_type' not in facts['common']: - service_type = 'atomic-openshift' - if deployment_type == 'origin': - service_type = 'origin' - facts['common']['service_type'] = service_type - for role in ('master', 'node'): if role in facts: deployment_type = facts['common']['deployment_type'] @@ -854,7 +844,7 @@ values provided as a list. Hence the gratuitous use of ['foo'] below. # If we've added items to the kubelet_args dict then we need # to merge the new items back into the main facts object. if kubelet_args != {}: - facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) + facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, []) return facts @@ -876,7 +866,7 @@ def build_controller_args(facts): controller_args['cloud-provider'] = ['gce'] controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] if controller_args != {}: - facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], []) + facts = merge_facts({'master': {'controller_args': controller_args}}, facts, []) return facts @@ -898,7 +888,7 @@ def build_api_server_args(facts): api_server_args['cloud-provider'] = ['gce'] api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] if api_server_args != {}: - facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], []) + facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, []) return facts @@ -1021,8 +1011,13 @@ def get_container_openshift_version(facts): If containerized, see if we can determine the installed version via the systemd environment files. """ + deployment_type = facts['common']['deployment_type'] + service_type_dict = {'origin': 'origin', + 'openshift-enterprise': 'atomic-openshift'} + service_type = service_type_dict[deployment_type] + for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']: - env_path = filename % facts['common']['service_type'] + env_path = filename % service_type if not os.path.exists(env_path): continue @@ -1085,7 +1080,7 @@ def apply_provider_facts(facts, provider_facts): # Disabling pylint too many branches. This function needs refactored # but is a very core part of openshift_facts. # pylint: disable=too-many-branches, too-many-nested-blocks -def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite): +def merge_facts(orig, new, additive_facts_to_overwrite): """ Recursively merge facts dicts Args: @@ -1093,14 +1088,11 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw new (dict): facts to update additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - protected_facts_to_overwrite (list): protected facts to overwrite in jinja - '.' notation ex: ['master.master_count'] Returns: dict: the merged facts """ additive_facts = ['named_certificates'] - protected_facts = ['ha'] # Facts we do not ever want to merge. These originate in inventory variables # and contain JSON dicts. We don't ever want to trigger a merge @@ -1132,14 +1124,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw if '.' in item and item.startswith(key + '.'): relevant_additive_facts.append(item) - # Collect the subset of protected facts to overwrite - # if key matches. These will be passed to the - # subsequent merge_facts call. - relevant_protected_facts = [] - for item in protected_facts_to_overwrite: - if '.' in item and item.startswith(key + '.'): - relevant_protected_facts.append(item) - facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts) + facts[key] = merge_facts(value, new[key], relevant_additive_facts) # Key matches an additive fact and we are not overwriting # it so we will append the new value to the existing value. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]: @@ -1149,18 +1134,6 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw if item not in new_fact: new_fact.append(item) facts[key] = new_fact - # Key matches a protected fact and we are not overwriting - # it so we will determine if it is okay to change this - # fact. - elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]: - # ha (bool) can not change unless it has been passed - # as a protected fact to overwrite. - if key == 'ha': - if safe_get_bool(value) != safe_get_bool(new[key]): - # pylint: disable=line-too-long - module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405 - else: - facts[key] = value # No other condition has been met. Overwrite the old fact # with the new value. else: @@ -1433,7 +1406,6 @@ def set_container_facts_if_unset(facts): facts['node']['ovs_system_image'] = ovs_image if safe_get_bool(facts['common']['is_containerized']): - facts['common']['admin_binary'] = '/usr/local/bin/oadm' facts['common']['client_binary'] = '/usr/local/bin/oc' return facts @@ -1494,8 +1466,6 @@ class OpenShiftFacts(object): local_facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - protected_facts_to_overwrite (list): protected facts to overwrite in jinja - '.' notation ex: ['master.master_count'] Raises: OpenShiftFactsUnsupportedRoleError: @@ -1511,10 +1481,7 @@ class OpenShiftFacts(object): # Disabling too-many-arguments, this should be cleaned up as a TODO item. # pylint: disable=too-many-arguments,no-value-for-parameter def __init__(self, role, filename, local_facts, - additive_facts_to_overwrite=None, - openshift_env=None, - openshift_env_structures=None, - protected_facts_to_overwrite=None): + additive_facts_to_overwrite=None): self.changed = False self.filename = filename if role not in self.known_roles: @@ -1536,34 +1503,23 @@ class OpenShiftFacts(object): self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405 self.facts = self.generate_facts(local_facts, - additive_facts_to_overwrite, - openshift_env, - openshift_env_structures, - protected_facts_to_overwrite) + additive_facts_to_overwrite) def generate_facts(self, local_facts, - additive_facts_to_overwrite, - openshift_env, - openshift_env_structures, - protected_facts_to_overwrite): + additive_facts_to_overwrite): """ Generate facts Args: local_facts (dict): local_facts for overriding generated defaults additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - openshift_env (dict): openshift_env facts for overriding generated defaults - protected_facts_to_overwrite (list): protected facts to overwrite in jinja - '.' notation ex: ['master.master_count'] Returns: dict: The generated facts """ + local_facts = self.init_local_facts(local_facts, - additive_facts_to_overwrite, - openshift_env, - openshift_env_structures, - protected_facts_to_overwrite) + additive_facts_to_overwrite) roles = local_facts.keys() if 'common' in local_facts and 'deployment_type' in local_facts['common']: @@ -1581,8 +1537,7 @@ class OpenShiftFacts(object): facts = apply_provider_facts(defaults, provider_facts) facts = merge_facts(facts, local_facts, - additive_facts_to_overwrite, - protected_facts_to_overwrite) + additive_facts_to_overwrite) facts = migrate_oauth_template_facts(facts) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) @@ -1627,7 +1582,7 @@ class OpenShiftFacts(object): hostname=hostname, public_hostname=hostname, portal_net='172.30.0.0/16', - client_binary='oc', admin_binary='oadm', + client_binary='oc', dns_domain='cluster.local', config_base='/etc/origin') @@ -1639,7 +1594,7 @@ class OpenShiftFacts(object): console_port='8443', etcd_use_ssl=True, etcd_hosts='', etcd_port='4001', portal_net='172.30.0.0/16', - embedded_etcd=True, embedded_kube=True, + embedded_kube=True, embedded_dns=True, bind_addr='0.0.0.0', session_max_seconds=3600, @@ -1732,65 +1687,17 @@ class OpenShiftFacts(object): ) return provider_facts - @staticmethod - def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures): - """ Split openshift_env facts based on openshift_env structures. - - Args: - openshift_env_fact (string): the openshift_env fact to split - ex: 'openshift_cloudprovider_openstack_auth_url' - openshift_env_structures (list): a list of structures to determine fact keys - ex: ['openshift.cloudprovider.openstack.*'] - Returns: - list: a list of keys that represent the fact - ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url'] - """ - # By default, we'll split an openshift_env fact by underscores. - fact_keys = openshift_env_fact.split('_') - - # Determine if any of the provided variable structures match the fact. - matching_structure = None - if openshift_env_structures is not None: - for structure in openshift_env_structures: - if re.match(structure, openshift_env_fact): - matching_structure = structure - # Fact didn't match any variable structures so return the default fact keys. - if matching_structure is None: - return fact_keys - - final_keys = [] - structure_keys = matching_structure.split('.') - for structure_key in structure_keys: - # Matched current key. Add to final keys. - if structure_key == fact_keys[structure_keys.index(structure_key)]: - final_keys.append(structure_key) - # Wildcard means we will be taking everything from here to the end of the fact. - elif structure_key == '*': - final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):])) - # Shouldn't have gotten here, return the fact keys. - else: - return fact_keys - return final_keys - # Disabling too-many-branches and too-many-locals. # This should be cleaned up as a TODO item. # pylint: disable=too-many-branches, too-many-locals def init_local_facts(self, facts=None, - additive_facts_to_overwrite=None, - openshift_env=None, - openshift_env_structures=None, - protected_facts_to_overwrite=None): + additive_facts_to_overwrite=None): """ Initialize the local facts Args: facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - openshift_env (dict): openshift env facts to set - protected_facts_to_overwrite (list): protected facts to overwrite in jinja - '.' notation ex: ['master.master_count'] - - Returns: dict: The result of merging the provided facts with existing local facts @@ -1802,45 +1709,13 @@ class OpenShiftFacts(object): if facts is not None: facts_to_set[self.role] = facts - if openshift_env != {} and openshift_env is not None: - for fact, value in iteritems(openshift_env): - oo_env_facts = dict() - current_level = oo_env_facts - keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:] - - if len(keys) > 0 and keys[0] != self.role: - continue - - # Build a dictionary from the split fact keys. - # After this loop oo_env_facts is the resultant dictionary. - # For example: - # fact = "openshift_metrics_install_metrics" - # value = 'true' - # keys = ['metrics', 'install', 'metrics'] - # result = {'metrics': {'install': {'metrics': 'true'}}} - for i, _ in enumerate(keys): - # This is the last key. Set the value. - if i == (len(keys) - 1): - current_level[keys[i]] = value - # This is a key other than the last key. Set as - # dictionary and continue. - else: - current_level[keys[i]] = dict() - current_level = current_level[keys[i]] - - facts_to_set = merge_facts(orig=facts_to_set, - new=oo_env_facts, - additive_facts_to_overwrite=[], - protected_facts_to_overwrite=[]) - local_facts = get_local_facts_from_file(self.filename) migrated_facts = migrate_local_facts(local_facts) new_local_facts = merge_facts(migrated_facts, facts_to_set, - additive_facts_to_overwrite, - protected_facts_to_overwrite) + additive_facts_to_overwrite) new_local_facts = self.remove_empty_facts(new_local_facts) @@ -1948,9 +1823,6 @@ def main(): choices=OpenShiftFacts.known_roles), local_facts=dict(default=None, type='dict', required=False), additive_facts_to_overwrite=dict(default=[], type='list', required=False), - openshift_env=dict(default={}, type='dict', required=False), - openshift_env_structures=dict(default=[], type='list', required=False), - protected_facts_to_overwrite=dict(default=[], type='list', required=False) ), supports_check_mode=True, add_file_common_args=True, @@ -1966,19 +1838,13 @@ def main(): role = module.params['role'] # noqa: F405 local_facts = module.params['local_facts'] # noqa: F405 additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405 - openshift_env = module.params['openshift_env'] # noqa: F405 - openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405 - protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405 fact_file = '/etc/ansible/facts.d/openshift.fact' openshift_facts = OpenShiftFacts(role, fact_file, local_facts, - additive_facts_to_overwrite, - openshift_env, - openshift_env_structures, - protected_facts_to_overwrite) + additive_facts_to_overwrite) file_params = module.params.copy() # noqa: F405 file_params['path'] = fact_file diff --git a/roles/openshift_health_checker/HOWTO_CHECKS.md b/roles/openshift_health_checker/HOWTO_CHECKS.md index 6c5662a4e..94961f2d4 100644 --- a/roles/openshift_health_checker/HOWTO_CHECKS.md +++ b/roles/openshift_health_checker/HOWTO_CHECKS.md @@ -12,7 +12,7 @@ Checks are typically implemented as two parts: The checks are called from Ansible playbooks via the `openshift_health_check` action plugin. See -[playbooks/byo/openshift-preflight/check.yml](../../playbooks/byo/openshift-preflight/check.yml) +[playbooks/openshift-checks/pre-install.yml](../../playbooks/openshift-checks/pre-install.yml) for an example. The action plugin dynamically discovers all checks and executes only those diff --git a/roles/openshift_health_checker/defaults/main.yml b/roles/openshift_health_checker/defaults/main.yml new file mode 100644 index 000000000..f25a0dc79 --- /dev/null +++ b/roles/openshift_health_checker/defaults/main.yml @@ -0,0 +1,6 @@ +--- +openshift_service_type_dict: + origin: origin + openshift-enterprise: atomic-openshift + +openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py index 090e438ff..980e23f27 100644 --- a/roles/openshift_health_checker/openshift_checks/package_availability.py +++ b/roles/openshift_health_checker/openshift_checks/package_availability.py @@ -15,7 +15,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck): return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum" def run(self): - rpm_prefix = self.get_var("openshift", "common", "service_type") + rpm_prefix = self.get_var("openshift_service_type") + if self._templar is not None: + rpm_prefix = self._templar.template(rpm_prefix) group_names = self.get_var("group_names", default=[]) packages = set() diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 13a91dadf..f3a628e28 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -41,7 +41,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck): return super(PackageVersion, self).is_active() and master_or_node def run(self): - rpm_prefix = self.get_var("openshift", "common", "service_type") + rpm_prefix = self.get_var("openshift_service_type") + if self._templar is not None: + rpm_prefix = self._templar.template(rpm_prefix) openshift_release = self.get_var("openshift_release", default='') deployment_type = self.get_var("openshift_deployment_type") check_multi_minor_release = deployment_type in ['openshift-enterprise'] diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index ec46c3b4b..fc333dfd4 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -8,12 +8,12 @@ def task_vars(): return dict( openshift=dict( common=dict( - service_type='origin', is_containerized=False, is_atomic=False, ), docker=dict(), ), + openshift_service_type='origin', openshift_deployment_type='origin', openshift_image_tag='', group_names=['oo_nodes_to_config', 'oo_masters_to_config'], diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index dd6f4ad81..a29dc166b 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -37,8 +37,9 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words) task_vars = dict( group_names=group_names, openshift=dict( - common=dict(service_type="origin", is_containerized=False), - ) + common=dict(is_containerized=False), + ), + openshift_service_type="origin" ) result = EtcdTraffic(execute_module, task_vars).run() diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index 6f0457549..dd98ff4d8 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -10,10 +10,11 @@ def test_openshift_version_not_supported(): openshift_release = '111.7.0' task_vars = dict( - openshift=dict(common=dict(service_type='origin')), + openshift=dict(common=dict()), openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, openshift_deployment_type='origin', + openshift_service_type='origin' ) with pytest.raises(OpenShiftCheckException) as excinfo: @@ -27,9 +28,10 @@ def test_invalid_openshift_release_format(): return {} task_vars = dict( - openshift=dict(common=dict(service_type='origin')), + openshift=dict(common=dict()), openshift_image_tag='v0', openshift_deployment_type='origin', + openshift_service_type='origin' ) with pytest.raises(OpenShiftCheckException) as excinfo: @@ -47,9 +49,10 @@ def test_invalid_openshift_release_format(): ]) def test_ovs_package_version(openshift_release, expected_ovs_version): task_vars = dict( - openshift=dict(common=dict(service_type='origin')), + openshift=dict(common=dict()), openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, + openshift_service_type='origin' ) return_value = {} # note: check.execute_module modifies return hash contents diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index 9815acb38..a1e6e0879 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -19,13 +19,13 @@ def test_is_active(pkg_mgr, is_containerized, is_active): @pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [ ( - dict(openshift=dict(common=dict(service_type='openshift'))), + dict(openshift_service_type='origin'), set(), set(['openshift-master', 'openshift-node']), ), ( dict( - openshift=dict(common=dict(service_type='origin')), + openshift_service_type='origin', group_names=['oo_masters_to_config'], ), set(['origin-master']), @@ -33,7 +33,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ), ( dict( - openshift=dict(common=dict(service_type='atomic-openshift')), + openshift_service_type='atomic-openshift', group_names=['oo_nodes_to_config'], ), set(['atomic-openshift-node']), @@ -41,7 +41,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active): ), ( dict( - openshift=dict(common=dict(service_type='atomic-openshift')), + openshift_service_type='atomic-openshift', group_names=['oo_masters_to_config', 'oo_nodes_to_config'], ), set(['atomic-openshift-master', 'atomic-openshift-node']), diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index 3cf4ce033..ea8e02b97 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -4,9 +4,12 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep def task_vars_for(openshift_release, deployment_type): + service_type_dict = {'origin': 'origin', + 'openshift-enterprise': 'atomic-openshift'} + service_type = service_type_dict[deployment_type] return dict( ansible_pkg_mgr='yum', - openshift=dict(common=dict(service_type=deployment_type)), + openshift_service_type=service_type, openshift_release=openshift_release, openshift_image_tag='v' + openshift_release, openshift_deployment_type=deployment_type, @@ -29,7 +32,7 @@ def test_openshift_version_not_supported(): def test_invalid_openshift_release_format(): task_vars = dict( ansible_pkg_mgr='yum', - openshift=dict(common=dict(service_type='origin')), + openshift_service_type='origin', openshift_image_tag='v0', openshift_deployment_type='origin', ) diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index a1c2c3956..3cf5d97c5 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -43,9 +43,9 @@ variables also control configuration behavior: **NOTE:** Configuring a value for `openshift_hosted_registry_storage_glusterfs_ips` with a `glusterfs_registry` -host group is not allowed. Specifying a `glusterfs_registry` host group -indicates that a new GlusterFS cluster should be configured, whereas -specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting +host group is not allowed. Specifying a `glusterfs_registry` host group +indicates that a new GlusterFS cluster should be configured, whereas +specifying `openshift_hosted_registry_storage_glusterfs_ips` indicates wanting to use a pre-configured GlusterFS cluster for the registry storage. _ @@ -53,7 +53,6 @@ _ Dependencies ------------ -* openshift_hosted_facts * openshift_persistent_volumes Example Playbook diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index 1d70ef7eb..ac9e241a5 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -12,6 +12,6 @@ galaxy_info: categories: - cloud dependencies: -- role: openshift_hosted_facts +- role: openshift_facts - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_hosted/tasks/registry.yml b/roles/openshift_hosted/tasks/registry.yml index de302c740..429f0c514 100644 --- a/roles/openshift_hosted/tasks/registry.yml +++ b/roles/openshift_hosted/tasks/registry.yml @@ -126,7 +126,7 @@ selector: "{{ openshift_hosted_registry_selector }}" replicas: "{{ openshift_hosted_registry_replicas | default(l_default_replicas) }}" service_account: "{{ openshift_hosted_registry_serviceaccount }}" - images: "{{ penshift_hosted_registry_registryurl }}" + images: "{{ openshift_hosted_registry_registryurl }}" env_vars: "{{ openshift_hosted_registry_env_vars }}" volume_mounts: "{{ openshift_hosted_registry_volumes }}" edits: "{{ openshift_hosted_registry_edits }}" diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..3c874d910 --- /dev/null +++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} +subsets: +- addresses: +{% for ip in openshift_hosted_registry_storage_glusterfs_ips %} + - ip: {{ ip }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..f18c94a4f --- /dev/null +++ b/roles/openshift_hosted/templates/v3.8/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..3c874d910 --- /dev/null +++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} +subsets: +- addresses: +{% for ip in openshift_hosted_registry_storage_glusterfs_ips %} + - ip: {{ ip }} +{% endfor %} + ports: + - port: 1 diff --git a/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..f18c94a4f --- /dev/null +++ b/roles/openshift_hosted/templates/v3.9/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }} +spec: + ports: + - port: 1 +status: + loadBalancer: {} diff --git a/roles/openshift_hosted_facts/meta/main.yml b/roles/openshift_hosted_facts/meta/main.yml deleted file mode 100644 index dd2de07bc..000000000 --- a/roles/openshift_hosted_facts/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: - author: Andrew Butcher - description: OpenShift Hosted Facts - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud -dependencies: -- role: openshift_facts diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_hosted_facts/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index e0329ee7c..1f4b5a116 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,12 +1,12 @@ --- - name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted + systemd: name={{ openshift_service_type }}-master-api state=restarted when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index fcb4c94d3..59d6098d4 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,17 +1,20 @@ --- -- oc_obj: - state: list - kind: project - name: "{{ item }}" - with_items: "{{ __default_logging_ops_projects }}" +- command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }} register: __logging_ops_projects - name: Annotate Operations Projects oc_edit: kind: ns - name: "{{ item.item }}" + name: "{{ project }}" separator: '#' content: metadata#annotations#openshift.io/logging.ui.hostname: "{{ openshift_logging_kibana_ops_hostname }}" - with_items: "{{ __logging_ops_projects.results }}" - when: item.results.stderr is not defined + with_items: "{{ __logging_ops_projects.stdout.split(' ') }}" + loop_control: + loop_var: project + when: + - __logging_ops_projects.stderr | length == 0 + - openshift_logging_use_ops | default(false) | bool diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ffed956a4..af36d67c6 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -107,6 +107,24 @@ - logging-fluentd - logging-mux +# remove annotations added by logging +- command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + get namespaces -o name {{ __default_logging_ops_projects | join(' ') }} + register: __logging_ops_projects + +- name: Remove Annotation of Operations Projects + command: > + {{ openshift.common.client_binary }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + annotate {{ project }} openshift.io/logging.ui.hostname- + with_items: "{{ __logging_ops_projects.stdout_lines }}" + loop_control: + loop_var: project + when: + - __logging_ops_projects.stderr | length == 0 + ## EventRouter - include_role: name: openshift_logging_eventrouter diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index 91db457d1..9949bb95d 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -31,3 +31,4 @@ local_action: file path="{{local_tmp.stdout}}" state=absent tags: logging_cleanup changed_when: False + become: no diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 0bfa9e85b..bf04094a3 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -9,6 +9,7 @@ metadata: logging-infra: "{{logging_component}}" spec: replicas: {{es_replicas|default(1)}} + revisionHistoryLimit: 0 selector: provider: openshift component: "{{component}}" diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index e92a35f27..12b4f5bfd 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -8,3 +8,4 @@ # wait half a second between labels - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} + become: no diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 96de82669..974d9781a 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -164,14 +164,14 @@ away. If you want to install CFME/MIQ at the same time you install your OCP/Origin cluster, ensure that `openshift_management_install_management` is set to `true` in your inventory. Call the standard -`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ +`playbooks/deploy_cluster.yml` playbook to begin the cluster and CFME/MIQ installation. If you are installing CFME/MIQ on an *already provisioned cluster* then you can call the CFME/MIQ playbook directly: ``` -$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/config.yml ``` *Note: Use `miq-template` in the following examples for ManageIQ installs* @@ -489,7 +489,7 @@ This playbook will: ``` -$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml +$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/openshift-management/add_container_provider.yml ``` ## Multiple Providers @@ -567,7 +567,7 @@ the config file path. ``` $ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ - playbooks/byo/openshift-management/add_many_container_providers.yml + playbooks/openshift-management/add_many_container_providers.yml ``` Afterwards you will find two new container providers in your @@ -579,7 +579,7 @@ to see an overview. This role includes a playbook to uninstall and erase the CFME/MIQ installation: -* `playbooks/byo/openshift-management/uninstall.yml` +* `playbooks/openshift-management/uninstall.yml` NFS export definitions and data stored on NFS exports are not automatically removed. You are urged to manually erase any data from diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index e768961ce..b5e234b7f 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -88,7 +88,7 @@ openshift_management_storage_nfs_local_hostname: false # name and password AND are trying to use integration scripts. # # For example, adding this cluster as a container provider, -# playbooks/byo/openshift-management/add_container_provider.yml +# playbooks/openshift-management/add_container_provider.yml openshift_management_username: admin openshift_management_password: smartvm diff --git a/roles/openshift_management/meta/main.yml b/roles/openshift_management/meta/main.yml index 07ad51126..9f19704a8 100644 --- a/roles/openshift_management/meta/main.yml +++ b/roles/openshift_management/meta/main.yml @@ -16,3 +16,4 @@ galaxy_info: dependencies: - role: lib_openshift - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 38b2fd8b8..efd119299 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -54,6 +54,48 @@ ha_svc_template_path: "native-cluster" openshift_docker_service_name: "{{ 'container-engine' if (openshift_docker_use_system_container | default(False)) else 'docker' }}" +openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" +loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" +openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" +openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" + +scheduler_config: + kind: Policy + apiVersion: v1 + predicates: "{{ openshift_master_scheduler_predicates + | default(openshift_master_scheduler_current_predicates + | default(openshift_master_scheduler_default_predicates)) }}" + priorities: "{{ openshift_master_scheduler_priorities + | default(openshift_master_scheduler_current_priorities + | default(openshift_master_scheduler_default_priorities)) }}" + +openshift_master_valid_grant_methods: +- auto +- prompt +- deny + +openshift_master_is_scaleup_host: False + +# These defaults assume forcing journald persistence, fsync to disk once +# a second, rate-limiting to 10,000 logs a second, no forwarding to +# syslog or wall, using 8GB of disk space maximum, using 10MB journal +# files, keeping only a days worth of logs per journal file, and +# retaining journal files no longer than a month. +journald_vars_to_replace: +- { var: Storage, val: persistent } +- { var: Compress, val: yes } +- { var: SyncIntervalSec, val: 1s } +- { var: RateLimitInterval, val: 1s } +- { var: RateLimitBurst, val: 10000 } +- { var: SystemMaxUse, val: 8G } +- { var: SystemKeepFree, val: 20% } +- { var: SystemMaxFileSize, val: 10M } +- { var: MaxRetentionSec, val: 1month } +- { var: MaxFileSec, val: 1day } +- { var: ForwardToSyslog, val: no } +- { var: ForwardToWall, val: no } + + # NOTE # r_openshift_master_*_default may be defined external to this role. # openshift_use_*, if defined, may affect other roles or play behavior. diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index e6b8b8ac8..557bfe022 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,7 +1,7 @@ --- - name: restart master api systemd: - name: "{{ openshift.common.service_type }}-master-api" + name: "{{ openshift_service_type }}-master-api" state: restarted when: - not (master_api_service_status_changed | default(false) | bool) @@ -10,7 +10,7 @@ # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 5f4e6df71..9be5508aa 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -16,7 +16,7 @@ - name: Install Master package package: - name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - not openshift.common.is_containerized | bool @@ -141,7 +141,7 @@ # The template file will stomp any other settings made. - block: - name: check whether our docker-registry setting exists in the env file - command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master" + command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master" failed_when: false changed_when: false register: l_already_set @@ -203,7 +203,7 @@ - name: Start and enable master api on first master systemd: - name: "{{ openshift.common.service_type }}-master-api" + name: "{{ openshift_service_type }}-master-api" enabled: yes state: started when: @@ -214,7 +214,7 @@ delay: 60 - name: Dump logs from master-api if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api + command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api when: - l_start_result | failed @@ -230,7 +230,7 @@ - name: Start and enable master api all masters systemd: - name: "{{ openshift.common.service_type }}-master-api" + name: "{{ openshift_service_type }}-master-api" enabled: yes state: started when: @@ -241,7 +241,7 @@ delay: 60 - name: Dump logs from master-api if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api + command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api when: - l_start_result | failed @@ -258,7 +258,7 @@ - name: Start and enable master controller service systemd: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" enabled: yes state: started register: l_start_result @@ -267,7 +267,7 @@ delay: 60 - name: Dump logs from master-controllers if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers + command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers when: - l_start_result | failed diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index ca04d2243..8b342a5b4 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -32,7 +32,7 @@ when: - openshift_docker_alternative_creds | default(False) | bool - oreg_auth_user is defined - - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool + - (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool register: master_oreg_auth_credentials_create_alt notify: - restart master api diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml index 4f8b758fd..715347101 100644 --- a/roles/openshift_master/tasks/restart.yml +++ b/roles/openshift_master/tasks/restart.yml @@ -1,7 +1,7 @@ --- - name: Restart master API service: - name: "{{ openshift.common.service_type }}-master-api" + name: "{{ openshift_service_type }}-master-api" state: restarted when: openshift_master_ha | bool - name: Wait for master API to come back online @@ -14,7 +14,7 @@ when: openshift_master_ha | bool - name: Restart master controllers service: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" state: restarted # Ignore errrors since it is possible that type != simple for # pre-3.1.1 installations. diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index 23386f11b..f6c5ce0dd 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -1,8 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - name: Pre-pull master system container image command: > @@ -12,12 +8,12 @@ - name: Check Master system container package command: > - atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master + atomic containers list --no-trunc -a -f container={{ openshift_service_type }}-master # HA - name: Install or Update HA api master system container oc_atomic_container: - name: "{{ openshift.common.service_type }}-master-api" + name: "{{ openshift_service_type }}-master-api" image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" state: latest values: @@ -25,7 +21,7 @@ - name: Install or Update HA controller master system container oc_atomic_container: - name: "{{ openshift.common.service_type }}-master-controllers" + name: "{{ openshift_service_type }}-master-controllers" image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" state: latest values: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index ee76413e3..76b6f46aa 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -13,7 +13,7 @@ - name: Disable the legacy master service if it exists systemd: - name: "{{ openshift.common.service_type }}-master" + name: "{{ openshift_service_type }}-master" state: stopped enabled: no masked: yes @@ -21,7 +21,7 @@ - name: Remove the legacy master service if it exists file: - path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service" + path: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master.service" state: absent ignore_errors: true when: @@ -40,7 +40,7 @@ - name: Create the ha systemd unit files template: src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2" - dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service" + dest: "{{ containerized_svc_dir }}/{{ openshift_service_type }}-master-{{ item }}.service" when: - not l_is_master_system_container | bool with_items: @@ -55,7 +55,7 @@ - name: enable master services systemd: - name: "{{ openshift.common.service_type }}-master-{{ item }}" + name: "{{ openshift_service_type }}-master-{{ item }}" enabled: yes with_items: - api @@ -64,13 +64,13 @@ - not l_is_master_system_container | bool - name: Preserve Master API Proxy Config options - command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api + command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-api register: l_master_api_proxy failed_when: false changed_when: false - name: Preserve Master API AWS options - command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api + command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-api register: master_api_aws failed_when: false changed_when: false @@ -78,7 +78,7 @@ - name: Create the master api service env file template: src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2" - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + dest: /etc/sysconfig/{{ openshift_service_type }}-master-api backup: true notify: - restart master api @@ -89,7 +89,7 @@ - "'http_proxy' not in openshift.common" - "'https_proxy' not in openshift.common" lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + dest: /etc/sysconfig/{{ openshift_service_type }}-master-api line: "{{ item }}" with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}" @@ -98,19 +98,19 @@ - master_api_aws.rc == 0 - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api + dest: /etc/sysconfig/{{ openshift_service_type }}-master-api line: "{{ item }}" with_items: "{{ master_api_aws.stdout_lines | default([]) }}" no_log: True - name: Preserve Master Controllers Proxy Config options - command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + command: grep PROXY /etc/sysconfig/{{ openshift_service_type }}-master-controllers register: master_controllers_proxy failed_when: false changed_when: false - name: Preserve Master Controllers AWS options - command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + command: grep AWS_ /etc/sysconfig/{{ openshift_service_type }}-master-controllers register: master_controllers_aws failed_when: false changed_when: false @@ -118,14 +118,14 @@ - name: Create the master controllers service env file template: src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2" - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers backup: true notify: - restart master controllers - name: Restore Master Controllers Proxy Config Options lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers line: "{{ item }}" with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}" when: @@ -135,7 +135,7 @@ - name: Restore Master Controllers AWS Options lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers + dest: /etc/sysconfig/{{ openshift_service_type }}-master-controllers line: "{{ item }}" with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}" when: diff --git a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml index caab3045a..f50b91ff5 100644 --- a/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_master/tasks/upgrade/rpm_upgrade.yml @@ -12,11 +12,11 @@ package: name={{ master_pkgs | join(',') }} state=present vars: master_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}" register: result until: result | success diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index cec3d3fb1..5e46d9121 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -3,18 +3,18 @@ Description=Atomic OpenShift Master API Documentation=https://github.com/openshift/origin After=etcd_container.service Wants=etcd_container.service -Before={{ openshift.common.service_type }}-node.service +Before={{ openshift_service_type }}-node.service After={{ openshift_docker_service_name }}.service PartOf={{ openshift_docker_service_name }}.service Requires={{ openshift_docker_service_name }}.service [Service] -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api Environment=GOTRACEBACK=crash -ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api +ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-api ExecStart=/usr/bin/docker run --rm --privileged --net=host \ - --name {{ openshift.common.service_type }}-master-api \ - --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api \ + --name {{ openshift_service_type }}-master-api \ + --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-api \ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock \ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ @@ -24,14 +24,14 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 -ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api +ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ r_openshift_master_data_dir }} -SyslogIdentifier={{ openshift.common.service_type }}-master-api +SyslogIdentifier={{ openshift_service_type }}-master-api Restart=always RestartSec=5s [Install] WantedBy={{ openshift_docker_service_name }}.service -WantedBy={{ openshift.common.service_type }}-node.service +WantedBy={{ openshift_service_type }}-node.service diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index a0248151d..899575f1a 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -1,19 +1,19 @@ [Unit] Description=Atomic OpenShift Master Controllers Documentation=https://github.com/openshift/origin -Wants={{ openshift.common.service_type }}-master-api.service -After={{ openshift.common.service_type }}-master-api.service +Wants={{ openshift_service_type }}-master-api.service +After={{ openshift_service_type }}-master-api.service After={{ openshift_docker_service_name }}.service Requires={{ openshift_docker_service_name }}.service PartOf={{ openshift_docker_service_name }}.service [Service] -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers Environment=GOTRACEBACK=crash -ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers +ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type}}-master-controllers ExecStart=/usr/bin/docker run --rm --privileged --net=host \ - --name {{ openshift.common.service_type }}-master-controllers \ - --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers \ + --name {{ openshift_service_type }}-master-controllers \ + --env-file=/etc/sysconfig/{{ openshift_service_type }}-master-controllers \ -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} \ -v /var/run/docker.sock:/var/run/docker.sock \ -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} \ @@ -23,11 +23,11 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \ {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ --config=${CONFIG_FILE} $OPTIONS ExecStartPost=/usr/bin/sleep 10 -ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers +ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ r_openshift_master_data_dir }} -SyslogIdentifier={{ openshift.common.service_type }}-master-controllers +SyslogIdentifier={{ openshift_service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 92668b227..f1a76e5f5 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -69,29 +69,13 @@ dnsConfig: bindNetwork: tcp4 {% endif %} etcdClientInfo: - ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }} + ca: master.etcd-ca.crt certFile: master.etcd-client.crt keyFile: master.etcd-client.key urls: {% for etcd_url in openshift.master.etcd_urls %} - {{ etcd_url }} {% endfor %} -{% if openshift.master.embedded_etcd | bool %} -etcdConfig: - address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }} - peerAddress: {{ openshift.common.hostname }}:7001 - peerServingInfo: - bindAddress: {{ openshift.master.bind_addr }}:7001 - certFile: etcd.server.crt - clientCA: ca-bundle.crt - keyFile: etcd.server.key - servingInfo: - bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }} - certFile: etcd.server.crt - clientCA: ca-bundle.crt - keyFile: etcd.server.key - storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd -{% endif %} etcdStorageConfig: kubernetesStoragePrefix: kubernetes.io kubernetesStorageVersion: v1 diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 index 02bfd6f62..ed8a47df8 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 @@ -3,12 +3,12 @@ Description=Atomic OpenShift Master API Documentation=https://github.com/openshift/origin After=network-online.target After=etcd.service -Before={{ openshift.common.service_type }}-node.service +Before={{ openshift_service_type }}-node.service Requires=network-online.target [Service] Type=notify -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-api Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 @@ -20,4 +20,4 @@ RestartSec=5s [Install] WantedBy=multi-user.target -WantedBy={{ openshift.common.service_type }}-node.service +WantedBy={{ openshift_service_type }}-node.service diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index fae021845..b36963f73 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -2,19 +2,19 @@ Description=Atomic OpenShift Master Controllers Documentation=https://github.com/openshift/origin After=network-online.target -After={{ openshift.common.service_type }}-master-api.service -Wants={{ openshift.common.service_type }}-master-api.service +After={{ openshift_service_type }}-master-api.service +Wants={{ openshift_service_type }}-master-api.service Requires=network-online.target [Service] Type=notify -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-master-controllers Environment=GOTRACEBACK=crash ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS LimitNOFILE=131072 LimitCORE=infinity WorkingDirectory={{ r_openshift_master_data_dir }} -SyslogIdentifier={{ openshift.common.service_type }}-master-controllers +SyslogIdentifier={{ openshift_service_type }}-master-controllers Restart=always RestartSec=5s diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml deleted file mode 100644 index 0c681c764..000000000 --- a/roles/openshift_master/vars/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -openshift_master_loopback_config: "{{ openshift_master_config_dir }}/openshift-master.kubeconfig" -loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}" -openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml" -openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json" - -scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_master_scheduler_predicates - | default(openshift_master_scheduler_current_predicates - | default(openshift_master_scheduler_default_predicates)) }}" - priorities: "{{ openshift_master_scheduler_priorities - | default(openshift_master_scheduler_current_priorities - | default(openshift_master_scheduler_default_priorities)) }}" - -openshift_master_valid_grant_methods: -- auto -- prompt -- deny - -openshift_master_is_scaleup_host: False - -# These defaults assume forcing journald persistence, fsync to disk once -# a second, rate-limiting to 10,000 logs a second, no forwarding to -# syslog or wall, using 8GB of disk space maximum, using 10MB journal -# files, keeping only a days worth of logs per journal file, and -# retaining journal files no longer than a month. -journald_vars_to_replace: -- { var: Storage, val: persistent } -- { var: Compress, val: yes } -- { var: SyncIntervalSec, val: 1s } -- { var: RateLimitInterval, val: 1s } -- { var: RateLimitBurst, val: 10000 } -- { var: SystemMaxUse, val: 8G } -- { var: SystemKeepFree, val: 20% } -- { var: SystemMaxFileSize, val: 10M } -- { var: MaxRetentionSec, val: 1month } -- { var: MaxFileSec, val: 1day } -- { var: ForwardToSyslog, val: no } -- { var: ForwardToWall, val: no } diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 0cb87dcaa..418dcba67 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -45,7 +45,6 @@ etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}" - embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}" embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}" embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}" bind_addr: "{{ openshift_master_bind_addr | default(None) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index e0329ee7c..1f4b5a116 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,12 +1,12 @@ --- - name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted + systemd: name={{ openshift_service_type }}-master-api state=restarted when: (not (master_api_service_status_changed | default(false) | bool)) notify: Verify API Server # We retry the controllers because the API may not be 100% initialized yet. - name: restart master controllers - command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" + command: "systemctl restart {{ openshift_service_type }}-master-controllers" retries: 3 delay: 5 register: result diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index 67f697924..87ceb8103 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -33,9 +33,9 @@ Notes Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below: ``` -oadm manage-node --schedulable=false ${NODE} -oadm manage-node --drain ${NODE} -oadm manage-node --schedulable=true ${NODE} +oc adm manage-node --schedulable=false ${NODE} +oc adm manage-node --drain ${NODE} +oc adm manage-node --schedulable=true ${NODE} ```` > If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`. diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index f3867fe4a..fff927944 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -14,7 +14,11 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) } l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" -openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}" +openshift_service_type_dict: + origin: origin + openshift-enterprise: atomic-openshift + +openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" system_images_registry_dict: openshift-enterprise: "registry.access.redhat.com" diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 229c6bbed..1d9797f84 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -4,11 +4,15 @@ name: NetworkManager state: restarted enabled: True + when: + - (not skip_node_svc_handlers | default(False) | bool) - name: restart dnsmasq systemd: name: dnsmasq state: restarted + when: + - (not skip_node_svc_handlers | default(False) | bool) - name: restart openvswitch systemd: @@ -34,7 +38,7 @@ - name: restart node systemd: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" state: restarted register: l_openshift_node_restart_node_result until: not l_openshift_node_restart_node_result | failed @@ -47,3 +51,5 @@ - name: reload systemd units command: systemctl daemon-reload + when: + - (not skip_node_svc_handlers | default(False) | bool) diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml index 38c2b794d..a7f1fc116 100644 --- a/roles/openshift_node/tasks/aws.yml +++ b/roles/openshift_node/tasks/aws.yml @@ -1,7 +1,7 @@ --- - name: Configure AWS Cloud Provider Settings lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + dest: /etc/sysconfig/{{ openshift_service_type }}-node regexp: "{{ item.regex }}" line: "{{ item.line }}" create: true diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 741a2234f..5d66de0a3 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,6 +2,10 @@ - name: Install the systemd units include_tasks: systemd_units.yml +- name: Pull container images + include_tasks: container_images.yml + when: openshift.common.is_containerized | bool + - name: Start and enable openvswitch service systemd: name: openvswitch.service @@ -38,7 +42,7 @@ - name: Configure Node Environment Variables lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + dest: /etc/sysconfig/{{ openshift_service_type }}-node regexp: "^{{ item.key }}=" line: "{{ item.key }}={{ item.value }}" create: true @@ -76,7 +80,7 @@ - name: Start and enable node dep systemd: daemon_reload: yes - name: "{{ openshift.common.service_type }}-node-dep" + name: "{{ openshift_service_type }}-node-dep" enabled: yes state: started @@ -84,7 +88,7 @@ block: - name: Start and enable node systemd: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" enabled: yes state: started daemon_reload: yes @@ -95,7 +99,7 @@ ignore_errors: true - name: Dump logs from node service if it failed - command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node + command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node when: node_start_result | failed - name: Abort if node failed to start diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml index 527580481..ebc1426d3 100644 --- a/roles/openshift_node/tasks/config/configure-node-settings.yml +++ b/roles/openshift_node/tasks/config/configure-node-settings.yml @@ -1,7 +1,7 @@ --- - name: Configure Node settings lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + dest: /etc/sysconfig/{{ openshift_service_type }}-node regexp: "{{ item.regex }}" line: "{{ item.line }}" create: true diff --git a/roles/openshift_node/tasks/config/configure-proxy-settings.yml b/roles/openshift_node/tasks/config/configure-proxy-settings.yml index d60794305..7ddd319d2 100644 --- a/roles/openshift_node/tasks/config/configure-proxy-settings.yml +++ b/roles/openshift_node/tasks/config/configure-proxy-settings.yml @@ -1,7 +1,7 @@ --- - name: Configure Proxy Settings lineinfile: - dest: /etc/sysconfig/{{ openshift.common.service_type }}-node + dest: /etc/sysconfig/{{ openshift_service_type }}-node regexp: "{{ item.regex }}" line: "{{ item.line }}" create: true diff --git a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml index ee91a88ab..9f1145d12 100644 --- a/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml +++ b/roles/openshift_node/tasks/config/install-node-deps-docker-service-file.yml @@ -1,7 +1,7 @@ --- - name: Install Node dependencies docker service file template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node-dep.service" + dest: "/etc/systemd/system/{{ openshift_service_type }}-node-dep.service" src: openshift.docker.node.dep.service notify: - reload systemd units diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml index f92ff79b5..649fc5f6b 100644 --- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml +++ b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml @@ -1,7 +1,7 @@ --- - name: Install Node docker service file template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" src: openshift.docker.node.service notify: - reload systemd units diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml new file mode 100644 index 000000000..0b8c806ae --- /dev/null +++ b/roles/openshift_node/tasks/container_images.yml @@ -0,0 +1,20 @@ +--- +- name: Install Node system container + include_tasks: node_system_container.yml + when: + - l_is_node_system_container | bool + +- name: Install OpenvSwitch system containers + include_tasks: openvswitch_system_container.yml + when: + - openshift_node_use_openshift_sdn | bool + - l_is_openvswitch_system_container | bool + +- name: Pre-pull openvswitch image + command: > + docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: + - openshift_node_use_openshift_sdn | bool + - not l_is_openvswitch_system_container | bool diff --git a/roles/openshift_node/tasks/dnsmasq.yml b/roles/openshift_node/tasks/dnsmasq.yml index f210a3a21..31ca46ec0 100644 --- a/roles/openshift_node/tasks/dnsmasq.yml +++ b/roles/openshift_node/tasks/dnsmasq.yml @@ -1,43 +1,4 @@ --- -- name: Check for NetworkManager service - command: > - systemctl show NetworkManager - register: nm_show - changed_when: false - ignore_errors: True - -- name: Set fact using_network_manager - set_fact: - network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" - -- name: Install dnsmasq - package: name=dnsmasq state=installed - when: not openshift.common.is_atomic | bool - register: result - until: result | success - -- name: ensure origin/node directory exists - file: - state: directory - path: "{{ item }}" - owner: root - group: root - mode: '0700' - with_items: - - /etc/origin - - /etc/origin/node - -# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed -# when the node stops. A dbus-message is sent to dnsmasq to add the same entries -# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or -# newer we can use --server-file option to update the servers dynamically and -# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else -# triggers a restart of dnsmasq but not a node restart. -- name: Install node-dnsmasq.conf - template: - src: node-dnsmasq.conf.j2 - dest: /etc/origin/node/node-dnsmasq.conf - - name: Install dnsmasq configuration template: src: origin-dns.conf.j2 @@ -63,7 +24,3 @@ # Dynamic NetworkManager based dispatcher - include_tasks: dnsmasq/network-manager.yml when: network_manager_active | bool - -# Relies on ansible in order to configure static config -- include_tasks: dnsmasq/no-network-manager.yml - when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml new file mode 100644 index 000000000..9f66bf12d --- /dev/null +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -0,0 +1,43 @@ +--- +- name: Check for NetworkManager service + command: > + systemctl show NetworkManager + register: nm_show + changed_when: false + ignore_errors: True + +- name: Set fact using_network_manager + set_fact: + network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}" + +- name: Install dnsmasq + package: name=dnsmasq state=installed + when: not openshift.common.is_atomic | bool + register: result + until: result | success + +- name: ensure origin/node directory exists + file: + state: directory + path: "{{ item }}" + owner: root + group: root + mode: '0700' + with_items: + - /etc/origin + - /etc/origin/node + +# this file is copied to /etc/dnsmasq.d/ when the node starts and is removed +# when the node stops. A dbus-message is sent to dnsmasq to add the same entries +# so that dnsmasq doesn't need to be restarted. Once we can use dnsmasq 2.77 or +# newer we can use --server-file option to update the servers dynamically and +# reload them by sending dnsmasq a SIGHUP. We write the file in case someone else +# triggers a restart of dnsmasq but not a node restart. +- name: Install node-dnsmasq.conf + template: + src: node-dnsmasq.conf.j2 + dest: /etc/origin/node/node-dnsmasq.conf + +# Relies on ansible in order to configure static config +- include_tasks: dnsmasq/no-network-manager.yml + when: not network_manager_active | bool diff --git a/roles/openshift_node/tasks/docker/upgrade.yml b/roles/openshift_node/tasks/docker/upgrade.yml deleted file mode 100644 index d743d2188..000000000 --- a/roles/openshift_node/tasks/docker/upgrade.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -# input variables: -# - openshift.common.service_type -# - openshift.common.is_containerized -# - docker_upgrade_nuke_images -# - docker_version -# - skip_docker_restart - -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - -- debug: var=docker_image_count.stdout - -# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition -- name: Remove all containers and images - script: nuke_images.sh - register: nuke_images_result - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Check Docker image count - shell: "docker images -aq | wc -l" - register: docker_image_count - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout - when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- service: - name: docker - state: stopped - register: l_openshift_node_upgrade_docker_stop_result - until: not l_openshift_node_upgrade_docker_stop_result | failed - retries: 3 - delay: 30 - -- name: Upgrade Docker - package: name=docker{{ '-' + docker_version }} state=present - register: result - until: result | success - -# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 1ed4a05c1..f93aed246 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -3,14 +3,14 @@ block: - name: Install Node package package: - name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present register: result until: result | success - name: Install sdn-ovs package package: - name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" + name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" state: present when: - openshift_node_use_openshift_sdn | bool diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index ae4916761..946deb4d3 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -6,6 +6,7 @@ - deployment_type == 'openshift-enterprise' - not openshift_use_crio +- include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml - name: setup firewall @@ -50,6 +51,8 @@ enabled: yes state: restarted when: openshift_use_crio + register: task_result + failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower - name: restart NetworkManager to ensure resolv.conf is present systemd: diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index eb8d9a6a5..98978ec6f 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -1,8 +1,4 @@ --- -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - name: Pre-pull node system container image command: > @@ -12,10 +8,10 @@ - name: Install or Update node system container oc_atomic_container: - name: "{{ openshift.common.service_type }}-node" + name: "{{ openshift_service_type }}-node" image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" values: - "DNS_DOMAIN={{ openshift.common.dns_domain }}" - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" - - "MASTER_SERVICE={{ openshift.common.service_type }}.service" + - "MASTER_SERVICE={{ openshift_service_type }}.service" state: latest diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index d33e172c1..b61bc84c1 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -7,11 +7,6 @@ l_service_name: "{{ openshift_docker_service_name }}" when: not openshift_use_crio -- name: Ensure proxies are in the atomic.conf - include_role: - name: openshift_atomic - tasks_from: proxy - - name: Pre-pull OpenVSwitch system container image command: > atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 397e1ba18..262ee698b 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -1,7 +1,7 @@ --- - name: Install Node service file template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" + dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" when: not l_is_node_system_container | bool notify: @@ -16,29 +16,10 @@ - name: include ovs service environment file include_tasks: config/install-ovs-service-env-file.yml - - name: Install Node system container - include_tasks: node_system_container.yml - when: - - l_is_node_system_container | bool - - - name: Install OpenvSwitch system containers - include_tasks: openvswitch_system_container.yml + - include_tasks: config/install-ovs-docker-service-file.yml when: - openshift_node_use_openshift_sdn | bool - - l_is_openvswitch_system_container | bool - -- block: - - name: Pre-pull openvswitch image - command: > - docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - - - include_tasks: config/install-ovs-docker-service-file.yml - when: - - openshift.common.is_containerized | bool - - openshift_node_use_openshift_sdn | bool - - not l_is_openvswitch_system_container | bool + - not l_is_openvswitch_system_container | bool - include_tasks: config/configure-node-settings.yml - include_tasks: config/configure-proxy-settings.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index 561b56918..87556533a 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -10,161 +10,29 @@ # tasks file for openshift_node_upgrade -- include_tasks: registry_auth.yml +- name: stop services for upgrade + include_tasks: upgrade/stop_services.yml -- name: Stop node and openvswitch services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift.common.service_type }}-node" - - openvswitch - failed_when: false - -- name: Stop additional containerized services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-master-api" - - etcd_container - failed_when: false - when: openshift.common.is_containerized | bool - -- name: Pre-pull node image - command: > - docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool - -- name: Pre-pull openvswitch image - command: > - docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: - - openshift.common.is_containerized | bool - - openshift_use_openshift_sdn | bool - -- include_tasks: docker/upgrade.yml - vars: - # We will restart Docker ourselves after everything is ready: - skip_docker_restart: True +# Ensure actually install latest package. +- name: download docker upgrade rpm + command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}" + register: result + until: result | success when: - l_docker_upgrade is defined - l_docker_upgrade | bool -- include_tasks: "{{ node_config_hook }}" - when: node_config_hook is defined - -- include_tasks: upgrade/rpm_upgrade.yml +- name: install pre-pulled rpms. + include_tasks: upgrade/rpm_upgrade_install.yml vars: - component: "node" openshift_version: "{{ openshift_pkg_version | default('') }}" when: not openshift.common.is_containerized | bool -- name: Remove obsolete docker-sdn-ovs.conf - file: - path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" - state: absent - -- include_tasks: upgrade/containerized_node_upgrade.yml - when: openshift.common.is_containerized | bool - -- name: Ensure containerized services stopped before Docker restart - service: - name: "{{ item }}" - state: stopped - with_items: - - etcd_container - - openvswitch - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - failed_when: false - when: openshift.common.is_containerized | bool - -- name: Stop rpm based services - service: - name: "{{ item }}" - state: stopped - with_items: - - "{{ openshift.common.service_type }}-node" - - openvswitch - failed_when: false - when: not openshift.common.is_containerized | bool - -# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 -- name: Clean up dockershim data - file: - path: "/var/lib/dockershim/sandbox/" - state: absent -- name: Upgrade openvswitch - package: - name: openvswitch - state: latest - when: not openshift.common.is_containerized | bool - register: result - until: result | success - -- name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/node/node-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_node) }}" - when: oreg_url is defined or oreg_url_node is defined - -# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory -- name: Check for swap usage - command: grep "^[^#].*swap" /etc/fstab - # grep: match any lines which don't begin with '#' and contain 'swap' - changed_when: false - failed_when: false - register: swap_result - - # Disable Swap Block -- block: - - - name: Disable swap - command: swapoff --all - - - name: Remove swap entries from /etc/fstab - replace: - dest: /etc/fstab - regexp: '(^[^#].*swap.*)' - replace: '# \1' - backup: yes - - - name: Add notice about disabling swap - lineinfile: - dest: /etc/fstab - line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' - state: present - - when: - - swap_result.stdout_lines | length > 0 - - openshift_disable_swap | default(true) | bool - # End Disable Swap Block - -- name: Reset selinux context - command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes - when: - - ansible_selinux is defined - - ansible_selinux.status == 'enabled' +- include_tasks: "{{ node_config_hook }}" + when: node_config_hook is defined -- name: Apply 3.6 dns config changes - yedit: - src: /etc/origin/node/node-config.yaml - key: "{{ item.key }}" - value: "{{ item.value }}" - with_items: - - key: "dnsBindAddress" - value: "127.0.0.1:53" - - key: "dnsRecursiveResolvConf" - value: "/etc/origin/node/resolv.conf" +- include_tasks: upgrade/config_changes.yml # Restart all services - include_tasks: upgrade/restart.yml @@ -181,4 +49,7 @@ retries: 24 delay: 5 +- include_tasks: dnsmasq_install.yml - include_tasks: dnsmasq.yml + +- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml new file mode 100644 index 000000000..e22018e6d --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -0,0 +1,77 @@ +--- +- name: Update systemd units + include_tasks: ../systemd_units.yml + when: openshift.common.is_containerized + +- name: Update oreg value + yedit: + src: "{{ openshift.common.config_base }}/node/node-config.yaml" + key: 'imageConfig.format' + value: "{{ oreg_url | default(oreg_url_node) }}" + when: oreg_url is defined or oreg_url_node is defined + +- name: Remove obsolete docker-sdn-ovs.conf + file: + path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf" + state: absent + +# https://bugzilla.redhat.com/show_bug.cgi?id=1513054 +- name: Clean up dockershim data + file: + path: "/var/lib/dockershim/sandbox/" + state: absent + +# Disable Swap Block (pre) +- block: + - name: Remove swap entries from /etc/fstab + replace: + dest: /etc/fstab + regexp: '(^[^#].*swap.*)' + replace: '# \1' + backup: yes + + - name: Add notice about disabling swap + lineinfile: + dest: /etc/fstab + line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' + state: present + + - name: Disable swap + command: swapoff --all + + when: + - openshift_node_upgrade_swap_result | default(False) | bool + - openshift_disable_swap | default(true) | bool +# End Disable Swap Block + +- name: Apply 3.6 dns config changes + yedit: + src: /etc/origin/node/node-config.yaml + key: "{{ item.key }}" + value: "{{ item.value }}" + with_items: + - key: "dnsBindAddress" + value: "127.0.0.1:53" + - key: "dnsRecursiveResolvConf" + value: "/etc/origin/node/resolv.conf" + +- name: Install Node service file + template: + dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" + src: "node.service.j2" + register: l_node_unit + +- name: Reset selinux context + command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes + when: + - ansible_selinux is defined + - ansible_selinux.status == 'enabled' + +# NOTE: This is needed to make sure we are using the correct set +# of systemd unit files. The RPMs lay down defaults but +# the install/upgrade may override them in /etc/systemd/system/. +# NOTE: We don't use the systemd module as some versions of the module +# require a service to be part of the call. +- name: Reload systemd units + command: systemctl daemon-reload + when: l_node_unit | changed diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml deleted file mode 100644 index 245de60a7..000000000 --- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include_tasks: ../systemd_units.yml - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml new file mode 100644 index 000000000..71f00dcd2 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -0,0 +1,15 @@ +--- +- name: Pre-pull node image + command: > + docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + +- name: Pre-pull openvswitch image + command: > + docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift_use_openshift_sdn | bool + +- include_tasks: ../container_images.yml diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 3f1abceab..717cfa712 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -1,6 +1,6 @@ --- # input variables: -# - openshift.common.service_type +# - openshift_service_type # - openshift.common.is_containerized # - openshift.common.hostname # - openshift.master.api_port @@ -13,6 +13,15 @@ - name: Reload systemd to ensure latest unit files command: systemctl daemon-reload +- name: Restart support services + service: + name: "{{ item }}" + state: restarted + enabled: True + with_items: + - NetworkManager + - dnsmasq + - name: Restart container runtime service: name: "{{ openshift_docker_service_name }}" @@ -27,9 +36,9 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" failed_when: false - name: Wait for master API to come back online diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index fcbe1a598..d2864e6b8 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -1,33 +1,24 @@ --- # input variables: -# - openshift.common.service_type +# - openshift_service_type # - component # - openshift_pkg_version # - openshift.common.is_atomic -# We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages - package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" +# Pre-pull new node rpm, but don't install +- name: download new node packages + command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}" register: result until: result | success + vars: + openshift_node_upgrade_rpm_list: + - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "PyYAML" + - "dnsmasq" -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool +# Pre-pull the rpms for openvswitch, but don't install +# openvswitch requires the latest version to be installed. +- name: download openvswitch upgrade rpm + command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch" register: result until: result | success - -- name: Install Node service file - template: - dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service" - src: "node.service.j2" - register: l_node_unit - -# NOTE: This is needed to make sure we are using the correct set -# of systemd unit files. The RPMs lay down defaults but -# the install/upgrade may override them in /etc/systemd/system/. -# NOTE: We don't use the systemd module as some versions of the module -# require a service to be part of the call. -- name: Reload systemd units - command: systemctl daemon-reload - when: l_node_unit | changed diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml new file mode 100644 index 000000000..6390be558 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -0,0 +1,19 @@ +--- +# input variables: +# - openshift_service_type +# - component +# - openshift_pkg_version +# - openshift.common.is_atomic + +# Install the pre-pulled RPM +# Note: dnsmasq is covered in it's own play. openvswitch is included here +# because once we have the latest rpm downloaded, it will happily be installed. +- name: download new node packages + command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}" + register: result + until: result | success + vars: + openshift_node_upgrade_rpm_list: + - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}" + - "PyYAML" + - "openvswitch" diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml new file mode 100644 index 000000000..bbf1c5f25 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -0,0 +1,43 @@ +--- +- name: Stop node and openvswitch services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift_service_type }}-node" + - openvswitch + failed_when: false + +- name: Ensure containerized services stopped before Docker restart + service: + name: "{{ item }}" + state: stopped + with_items: + - etcd_container + - openvswitch + - "{{ openshift_service_type }}-master-api" + - "{{ openshift_service_type }}-master-controllers" + - "{{ openshift_service_type }}-node" + failed_when: false + when: openshift.common.is_containerized | bool + +- service: + name: docker + state: stopped + register: l_openshift_node_upgrade_docker_stop_result + until: not l_openshift_node_upgrade_docker_stop_result | failed + retries: 3 + delay: 30 + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- name: Stop rpm based services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift_service_type }}-node" + - openvswitch + failed_when: false + when: not openshift.common.is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml new file mode 100644 index 000000000..3346b7c65 --- /dev/null +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -0,0 +1,56 @@ +--- +# This is a hack to allow us to update various components without restarting +# services. This will persist into the upgrade play as well, so everything +# needs to be restarted by hand. +- set_fact: + skip_node_svc_handlers: True + +- include_tasks: registry_auth.yml + +- name: update package meta data to speed install later. + command: "{{ ansible_pkg_mgr }} makecache" + register: result + until: result | success + when: not openshift.common.is_containerized | bool + +- name: Check Docker image count + shell: "docker images -aq | wc -l" + register: docker_image_count + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- debug: var=docker_image_count.stdout + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- include_tasks: upgrade/containerized_upgrade_pull.yml + when: openshift.common.is_containerized | bool + +# Prepull the rpms for docker upgrade, but don't install +- name: download docker upgrade rpm + command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}" + register: result + until: result | success + when: + - l_docker_upgrade is defined + - l_docker_upgrade | bool + +- include_tasks: upgrade/rpm_upgrade.yml + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: not openshift.common.is_containerized | bool + +# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory +- name: Check for swap usage + command: grep "^[^#].*swap" /etc/fstab + # grep: match any lines which don't begin with '#' and contain 'swap' + changed_when: false + failed_when: false + register: swap_result + +# Set this fact here so we can use it during the next play, which is serial. +- name: set_fact swap_result + set_fact: + openshift_node_upgrade_swap_result: "{{ swap_result.stdout_lines | length > 0 | bool }}" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 16fdde02e..261cac6f1 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -20,9 +20,9 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam container-runtime: - remote container-runtime-endpoint: - - /var/run/crio.sock + - /var/run/crio/crio.sock image-service-endpoint: - - /var/run/crio.sock + - /var/run/crio/crio.sock node-labels: - router=true - registry=true diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 5964ac095..8b43beb07 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -1,11 +1,11 @@ [Unit] Requires={{ openshift_docker_service_name }}.service After={{ openshift_docker_service_name }}.service -PartOf={{ openshift.common.service_type }}-node.service -Before={{ openshift.common.service_type }}-node.service +PartOf={{ openshift_service_type }}-node.service +Before={{ openshift_service_type }}-node.service {% if openshift_use_crio %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift_service_type }}-node-dep; fi" ExecStop= -SyslogIdentifier={{ openshift.common.service_type }}-node-dep +SyslogIdentifier={{ openshift_service_type }}-node-dep diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 3b33ca542..b174c7023 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -1,5 +1,5 @@ [Unit] -After={{ openshift.common.service_type }}-master.service +After={{ openshift_service_type }}-master.service After={{ openshift_docker_service_name }}.service After=openvswitch.service PartOf={{ openshift_docker_service_name }}.service @@ -10,20 +10,20 @@ PartOf=openvswitch.service After=ovsdb-server.service After=ovs-vswitchd.service {% endif %} -Wants={{ openshift.common.service_type }}-master.service -Requires={{ openshift.common.service_type }}-node-dep.service -After={{ openshift.common.service_type }}-node-dep.service +Wants={{ openshift_service_type }}-master.service +Requires={{ openshift_service_type }}-node-dep.service +After={{ openshift_service_type }}-node-dep.service Requires=dnsmasq.service After=dnsmasq.service [Service] -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node -EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep -ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node +EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node-dep +ExecStartPre=-/usr/bin/docker rm -f {{ openshift_service_type }}-node ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/ ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1 -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ - --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node \ +ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \ + --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift_service_type }}-node \ -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} \ -e HOST=/rootfs -e HOST_ETC=/host-etc \ -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}:rslave \ @@ -40,10 +40,10 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ {{ openshift.node.node_image }}:${IMAGE_VERSION} ExecStartPost=/usr/bin/sleep 10 -ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node +ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string: -SyslogIdentifier={{ openshift.common.service_type }}-node +SyslogIdentifier={{ openshift_service_type }}-node Restart=always RestartSec=5s diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 929b76f54..65a647b8f 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -44,6 +44,9 @@ openshift_openstack_container_storage_setup: # populate-dns openshift_openstack_dns_records_add: [] +openshift_openstack_public_hostname_suffix: "" +openshift_openstack_private_hostname_suffix: "" +openshift_openstack_public_dns_domain: "example.com" openshift_openstack_full_dns_domain: "{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}" openshift_openstack_app_subdomain: "apps" diff --git a/roles/openshift_openstack/tasks/populate-dns.yml b/roles/openshift_openstack/tasks/populate-dns.yml index eae4967f7..cf2ead5c3 100644 --- a/roles/openshift_openstack/tasks/populate-dns.yml +++ b/roles/openshift_openstack/tasks/populate-dns.yml @@ -1,7 +1,7 @@ --- - name: "Generate list of private A records" set_fact: - private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['private_v4'] } ] }}" + private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_private_hostname_suffix, 'ip': hostvars[item]['private_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" - name: "Add wildcard records to the private A records for infrahosts" @@ -48,7 +48,7 @@ - name: "Generate list of public A records" set_fact: - public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'], 'ip': hostvars[item]['public_v4'] } ] }}" + public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'hostname': hostvars[item]['ansible_hostname'] + openshift_openstack_public_hostname_suffix, 'ip': hostvars[item]['public_v4'] } ] }}" with_items: "{{ groups['cluster_hosts'] }}" when: hostvars[item]['public_v4'] is defined diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md index f1eca1da6..1ebeacabf 100644 --- a/roles/openshift_prometheus/README.md +++ b/roles/openshift_prometheus/README.md @@ -38,11 +38,13 @@ openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-bl Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable: ``` openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir) +openshift_prometheus_<COMPONENT>_storage_class: <VALUE> openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE> ``` e.g ``` openshift_prometheus_storage_type: pvc +openshift_prometheus_storage_class: glusterfs-storage openshift_prometheus_alertmanager_pvc_name: alertmanager openshift_prometheus_alertbuffer_pvc_size: 10G openshift_prometheus_pvc_access_modes: [ReadWriteOnce] diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index df331a4bb..e30108d2c 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -23,6 +23,7 @@ openshift_prometheus_pvc_name: prometheus openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}" openshift_prometheus_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}" +openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}" # One of ['emptydir', 'pvc'] openshift_prometheus_alertmanager_storage_type: "emptydir" @@ -30,6 +31,7 @@ openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}" +openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}" # One of ['emptydir', 'pvc'] openshift_prometheus_alertbuffer_storage_type: "emptydir" @@ -37,6 +39,7 @@ openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}" openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce] openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}" +openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}" # container resources openshift_prometheus_cpu_limit: null diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index ad15dc65f..abc5dd476 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -131,6 +131,7 @@ access_modes: "{{ openshift_prometheus_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_pvc_size }}" selector: "{{ openshift_prometheus_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_sc_name }}" when: openshift_prometheus_storage_type == 'pvc' - name: create alertmanager pvc @@ -140,6 +141,7 @@ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}" selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}" when: openshift_prometheus_alertmanager_storage_type == 'pvc' - name: create alertbuffer pvc @@ -149,6 +151,7 @@ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}" volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}" selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}" + storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}" when: openshift_prometheus_alertbuffer_storage_type == 'pvc' # prometheus configmap diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index 6e8792446..e543d753c 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -66,7 +66,7 @@ - name: "Set anyuid permissions for efs" command: > - {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 5e7bde1e1..83954eaf8 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -37,6 +37,13 @@ - when: r_openshift_repos_has_run is not defined block: + - include_tasks: rhel_repos.yml + when: + - ansible_distribution == 'RedHat' + - deployment_type == 'openshift-enterprise' + - (rhel_subscription_user or rhsub_user) is defined + - (rhel_subscription_password or rhsub_pass) is defined + - include_tasks: centos_repos.yml when: - ansible_os_family == "RedHat" diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml new file mode 100644 index 000000000..c384cbe9a --- /dev/null +++ b/roles/openshift_repos/tasks/rhel_repos.yml @@ -0,0 +1,34 @@ +--- +- name: Ensure RHEL rhui repositories are disabled + command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'rhui'" + register: repo_rhui + changed_when: "repo_rhui.rc != 1" + failed_when: repo_rhui.rc == 11 + +- name: Disable RHEL rhui repositories + command: bash -c "yum-config-manager \ + --disable 'rhui-REGION-client-config-server-7' \ + --disable 'rhui-REGION-rhel-server-rh-common' \ + --disable 'rhui-REGION-rhel-server-releases' \ + --disable 'rhui-REGION-client-config-server-7'" + when: repo_rhui.changed + +- name: Ensure RHEL repositories are enabled + command: bash -c "yum -q --noplugins repolist | grep -v 'repo id' | grep 'Red Hat' | wc -l" + register: repo_rhel + changed_when: "'4' not in repo_rhel.stdout" + failed_when: repo_rhel.rc == 11 + +- name: Disable all repositories + command: bash -c "subscription-manager repos --disable='*'" + when: repo_rhel.changed + +- name: Enable RHEL repositories + command: subscription-manager repos \ + --enable="rhel-7-server-rpms" \ + --enable="rhel-7-server-extras-rpms" \ + --enable="rhel-7-server-ose-{{ (openshift_release | default('')).split('.')[0:2] | join('.') }}-rpms" \ + --enable="rhel-7-fast-datapath-rpms" + register: subscribe_repos + until: subscribe_repos | succeeded + when: repo_rhel.changed diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index be749a2e1..f7bd58db3 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -82,6 +82,7 @@ GlusterFS cluster into a new or existing OpenShift cluster: | openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name | openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. | openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default | openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' | openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods | openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service @@ -125,13 +126,14 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | Description | -|-------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above +| Name | Default value | Description | +|-----------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): @@ -147,7 +149,6 @@ Dependencies ------------ * os_firewall -* openshift_hosted_facts * openshift_repos * lib_openshift diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index b7b3c0db2..da34fab2a 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -5,6 +5,7 @@ openshift_storage_glusterfs_name: 'storage' openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" openshift_storage_glusterfs_use_default_selector: False openshift_storage_glusterfs_storageclass: True +openshift_storage_glusterfs_storageclass_default: False openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_block_deploy: True @@ -51,6 +52,7 @@ openshift_storage_glusterfs_registry_name: 'registry' openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" openshift_storage_glusterfs_registry_storageclass: False +openshift_storage_glusterfs_registry_storageclass_default: False openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}" diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml index 0cdd33880..6a4ef942b 100644 --- a/roles/openshift_storage_glusterfs/meta/main.yml +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -10,6 +10,6 @@ galaxy_info: versions: - 7 dependencies: -- role: openshift_hosted_facts +- role: openshift_facts - role: lib_openshift - role: lib_os_firewall diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 73b9791eb..2ea7286f3 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -7,6 +7,7 @@ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" + glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_storageclass_default | bool }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 30e83e79b..0c2fcb2c5 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -23,7 +23,7 @@ state: absent labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: glusterfs_wipe + when: "'openshift' in hostvars[item] and glusterfs_wipe" - name: Delete pre-existing GlusterFS config file: diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 7466702b8..b7cff6514 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -7,6 +7,7 @@ glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" + glusterfs_storageclass_default: "{{ openshift_storage_glusterfs_registry_storageclass_default | bool }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}" diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 index 454e84aaf..5ea17428f 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1beta1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 index 095fb780f..ca87807fe 100644 --- a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 @@ -3,6 +3,10 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: glusterfs-{{ glusterfs_name }} +{% if glusterfs_storageclass_default is defined and glusterfs_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} provisioner: kubernetes.io/glusterfs parameters: resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index 98f7c317e..d61e6873a 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -11,4 +11,4 @@ galaxy_info: - 7 dependencies: - role: lib_os_firewall -- role: openshift_hosted_facts +- role: openshift_facts diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index 01a1a7472..354699637 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -1,2 +1,10 @@ --- openshift_protect_installed_version: True + +openshift_service_type_dict: + origin: origin + openshift-enterprise: atomic-openshift + +openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}" + +openshift_use_crio_only: False diff --git a/roles/openshift_version/meta/main.yml b/roles/openshift_version/meta/main.yml index 5d7683120..d0ad4b7d2 100644 --- a/roles/openshift_version/meta/main.yml +++ b/roles/openshift_version/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info: - cloud dependencies: - role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index 4f9158ade..ae0f68a5b 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -101,13 +101,13 @@ when: is_containerized | bool - block: - - name: Get available {{ openshift.common.service_type}} version + - name: Get available {{ openshift_service_type}} version repoquery: - name: "{{ openshift.common.service_type}}" + name: "{{ openshift_service_type}}" ignore_excluders: true register: rpm_results - fail: - msg: "Package {{ openshift.common.service_type}} not found" + msg: "Package {{ openshift_service_type}} not found" when: not rpm_results.results.package_found - set_fact: openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}" @@ -196,7 +196,7 @@ - openshift_version.startswith(openshift_release) | bool msg: |- You requested openshift_release {{ openshift_release }}, which is not matched by - the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }} + the latest OpenShift RPM we detected as {{ openshift_service_type }}-{{ openshift_version }} on host {{ inventory_hostname }}. We will only install the latest RPMs, so please ensure you are getting the release you expect. You may need to adjust your Ansible inventory, modify the repositories diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml index c40777bf1..c7ca5ceae 100644 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ b/roles/openshift_version/tasks/set_version_rpm.yml @@ -8,14 +8,14 @@ - openshift_version is not defined - block: - - name: Get available {{ openshift.common.service_type}} version + - name: Get available {{ openshift_service_type}} version repoquery: - name: "{{ openshift.common.service_type}}" + name: "{{ openshift_service_type}}" ignore_excluders: true register: rpm_results - fail: - msg: "Package {{ openshift.common.service_type}} not found" + msg: "Package {{ openshift_service_type}} not found" when: not rpm_results.results.package_found - set_fact: diff --git a/roles/rhel_subscribe/README.md b/roles/rhel_subscribe/README.md new file mode 100644 index 000000000..15eaf4f30 --- /dev/null +++ b/roles/rhel_subscribe/README.md @@ -0,0 +1,29 @@ +RHEL Subscribe +============== + +Subscribes the RHEL servers and add the OpenShift enterprise repos. + +Role variables +-------------- + +### `rhsub_user` + +Username for the subscription-manager. + +### `rhsub_pass` + +Password for the subscription-manager. + +### `rhsub_pool` + +Name of the pool to attach (optional). + +### `rhsub_server` + +Custom hostname for the Satellite server (optional). + +### `openshift_release` + +Version for the OpenShift Enterprise repositories. + +Example: `3.6` diff --git a/roles/rhel_subscribe/defaults/main.yml b/roles/rhel_subscribe/defaults/main.yml new file mode 100644 index 000000000..80b2ab919 --- /dev/null +++ b/roles/rhel_subscribe/defaults/main.yml @@ -0,0 +1,2 @@ +--- +rhsub_pool: 'Red Hat OpenShift Container Platform, Premium*' diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml deleted file mode 100644 index fa74c9953..000000000 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Disable all repositories - command: subscription-manager repos --disable="*" - -- set_fact: - default_ose_version: '3.6' - when: deployment_type == 'openshift-enterprise' - -- set_fact: - ose_version: "{{ lookup('env', 'ose_version') | default(default_ose_version, True) }}" - -- fail: - msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" - when: - - deployment_type == 'openshift-enterprise' - - ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6'] ) - -- name: Enable RHEL repositories - command: subscription-manager repos \ - --enable="rhel-7-server-rpms" \ - --enable="rhel-7-server-extras-rpms" \ - --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \ - --enable="rhel-7-fast-datapath-rpms" - register: subscribe_repos - until: subscribe_repos | succeeded diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml index f83cf9157..74ee8bbfe 100644 --- a/roles/rhel_subscribe/tasks/main.yml +++ b/roles/rhel_subscribe/tasks/main.yml @@ -1,12 +1,8 @@ --- -# TODO: Enhance redhat_subscription module -# to make it able to attach to a pool -# to make it able to enable repositories - - set_fact: + rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" rhel_subscription_pool: "{{ lookup('env', 'rhel_subscription_pool') | default(rhsub_pool | default('Red Hat OpenShift Container Platform, Premium*')) }}" rhel_subscription_user: "{{ lookup('env', 'rhel_subscription_user') | default(rhsub_user | default(omit, True)) }}" - rhel_subscription_pass: "{{ lookup('env', 'rhel_subscription_pass') | default(rhsub_pass | default(omit, True)) }}" rhel_subscription_server: "{{ lookup('env', 'rhel_subscription_server') | default(rhsub_server | default(omit, True)) }}" - fail: @@ -14,23 +10,12 @@ when: ansible_distribution != 'RedHat' - fail: - msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role. - when: rhel_subscription_user is not defined + msg: The rhel_subscription_user variable is required for this role. + when: rhel_subscription_user is not defined or not rhsub_user is not defined - fail: - msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role. - when: rhel_subscription_pass is not defined - -- name: Detecting Atomic Host Operating System - stat: - path: /run/ostree-booted - register: ostree_booted - -- name: Satellite preparation - command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm" - args: - creates: /etc/rhsm/ca/katello-server-ca.pem - when: rhel_subscription_server is defined and rhel_subscription_server + msg: The rhel_subscription_pass variable is required for this role. + when: rhel_subscription_pass is not defined or not rhsub_pass is not defined - name: Install Red Hat Subscription manager yum: @@ -39,37 +24,58 @@ register: result until: result | success -- name: RedHat subscriptions +- name: Is host already registered? + command: bash -c "subscription-manager version" + register: rh_subscribed + changed_when: "'not registered' in rh_subscribed.stdout" + ignore_errors: yes + +- name: Register host redhat_subscription: username: "{{ rhel_subscription_user }}" password: "{{ rhel_subscription_pass }}" register: rh_subscription until: rh_subscription | succeeded + when: + - "'not registered' in rh_subscribed.stdout" + - rhel_subscription_user is defined + - rhel_subscription_pass is defined -- name: Retrieve the OpenShift Pool ID - command: subscription-manager list --available --matches="{{ rhel_subscription_pool }}" --pool-only - register: openshift_pool_id - until: openshift_pool_id | succeeded - changed_when: False +- fail: + msg: 'Unable to register host with Red Hat Subscription Manager' + when: + - "'not registered' in rh_subscribed.stdout" + - rh_subscription.failed - name: Determine if OpenShift Pool Already Attached - command: subscription-manager list --consumed --matches="{{ rhel_subscription_pool }}" --pool-only + command: bash -c "subscription-manager list --consumed --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}" register: openshift_pool_attached - until: openshift_pool_attached | succeeded - changed_when: False - when: openshift_pool_id.stdout == '' + changed_when: rhel_subscription_pool not in openshift_pool_attached.stdout + failed_when: openshift_pool_attached.rc == 2 + ignore_errors: yes + +- name: Retrieve the OpenShift Pool ID + command: bash -c "subscription-manager list --available --pool-only --matches '*OpenShift*' | grep {{ rhel_subscription_pool }}" + register: openshift_pool_retrieve + changed_when: rhel_subscription_pool in openshift_pool_retrieve.stdout + when: rhel_subscription_pool not in openshift_pool_attached.stdout + ignore_errors: yes - fail: - msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available or consumed pools" - when: openshift_pool_id.stdout == '' and openshift_pool_attached is defined and openshift_pool_attached.stdout == '' + msg: "Unable to find pool matching {{ rhel_subscription_pool }} in available pools" + when: + - rhel_subscription_pool not in openshift_pool_attached.stdout + - rhel_subscription_pool not in openshift_pool_retrieve.stdout - name: Attach to OpenShift Pool - command: subscription-manager attach --pool {{ openshift_pool_id.stdout_lines[0] }} - register: subscribe_pool - until: subscribe_pool | succeeded - when: openshift_pool_id.stdout != '' + command: bash -c "subscription-manager attach --pool {{ rhel_subscription_pool }}" + register: openshift_pool_attached + changed_when: "'Successfully attached a subscription' in openshift_pool_attached.stdout" + when: rhel_subscription_pool not in openshift_pool_attached.stdout -- include_tasks: enterprise.yml +- include_role: + role: rhel_subscribe + tasks_from: satellite when: - - deployment_type == 'openshift-enterprise' - - not ostree_booted.stat.exists | bool + - (rhel_subscription_server or rhsub_server) is defined + - (rhel_subscription_server or rhsub_server) diff --git a/roles/rhel_subscribe/tasks/satellite.yml b/roles/rhel_subscribe/tasks/satellite.yml new file mode 100644 index 000000000..b2b2a621d --- /dev/null +++ b/roles/rhel_subscribe/tasks/satellite.yml @@ -0,0 +1,5 @@ +--- +- name: Satellite preparation + command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm" + args: + creates: /etc/rhsm/ca/katello-server-ca.pem |