diff options
Diffstat (limited to 'playbooks')
74 files changed, 437 insertions, 1472 deletions
| diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 07f10d48c..5ed55a817 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -151,6 +151,14 @@        - lbr0        - vlinuxbr        - vovsbr + +    - name: Remove virtual devices +      command: nmcli delete device "{{ item }}" +      failed_when: False +      with_items: +      - tun0 +      - docker0 +      when: openshift_remove_all | default(true) | bool    - shell: atomic uninstall "{{ item }}"-master-api @@ -264,12 +272,30 @@      - "{{ directories.results | default([]) }}"      - files +  - shell: systemctl daemon-reload +    changed_when: False + +  - name: restart container-engine +    service: name=container-engine state=stopped enabled=no +    failed_when: false +    register: container_engine + +  - name: restart docker +    service: name=docker state=stopped enabled=no +    failed_when: false +    when: not (container_engine | changed) +    register: l_docker_restart_docker_in_pb_result +    until: not l_docker_restart_docker_in_pb_result | failed +    retries: 3 +    delay: 30 +    - name: Remove remaining files      file: path={{ item }} state=absent      with_items:      - /etc/ansible/facts.d/openshift.fact      - /etc/openshift      - /etc/openshift-sdn +    - /etc/pki/ca-trust/source/anchors/openshift-ca.crt      - /etc/sysconfig/atomic-openshift-node      - /etc/sysconfig/atomic-openshift-node-dep      - /etc/sysconfig/openshift-node-dep @@ -284,23 +310,38 @@      - /etc/systemd/system/origin-node-dep.service      - /etc/systemd/system/origin-node.service      - /etc/systemd/system/origin-node.service.wants +    - /var/lib/docker + +  - name: Rebuild ca-trust +    command: update-ca-trust + +  - name: Reset Docker proxy configuration +    lineinfile: +      state=absent +      dest=/etc/sysconfig/docker +      regexp='(NO_PROXY|HTTP_PROXY|HTTPS_PROXY)=.*' + +  - name: Reset Docker registry configuration +    lineinfile: +      state=absent +      dest=/etc/sysconfig/docker +      regexp='(ADD_REGISTRY|BLOCK_REGISTRY|INSECURE_REGISTRY)=.*' + +  - name: Detect Docker storage configuration +    shell: vgs -o name | grep docker +    register: docker_vg_name +    failed_when: false +    changed_when: false -  - shell: systemctl daemon-reload -    changed_when: False +  - name: Wipe out Docker storage contents +    command: vgremove -f {{ item }} +    with_items: "{{ docker_vg_name.stdout_lines }}" +    when: docker_vg_name.rc == 0 -  - name: restart container-engine -    service: name=container-engine state=restarted -    failed_when: false -    register: container_engine +  - name: Wipe out Docker storage configuration +    file: path=/etc/sysconfig/docker-storage state=absent +    when: docker_vg_name.rc == 0 -  - name: restart docker -    service: name=docker state=restarted -    failed_when: false -    when: not (container_engine | changed) -    register: l_docker_restart_docker_in_pb_result -    until: not l_docker_restart_docker_in_pb_result | failed -    retries: 3 -    delay: 30  - hosts: masters    become: yes @@ -525,3 +566,7 @@      with_items:      - /etc/ansible/facts.d/openshift.fact      - /var/lib/haproxy/stats +    # Here we remove only limits.conf rather than directory, as users may put their files. +    # - /etc/systemd/system/haproxy.service.d +    - /etc/systemd/system/haproxy.service.d/limits.conf +    - /etc/systemd/system/haproxy.service diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml new file mode 100644 index 000000000..db6e3b8e1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/hosted.yml @@ -0,0 +1,22 @@ +--- +- include: ../../common/openshift-cluster/openshift_hosted.yml + +- include: ../../common/openshift-cluster/openshift_metrics.yml +  when: openshift_metrics_install_metrics | default(false) | bool + +- include: ../../common/openshift-cluster/openshift_logging.yml +  when: openshift_logging_install_logging | default(false) | bool + +- include: ../../common/openshift-cluster/service_catalog.yml +  when: openshift_enable_service_catalog | default(false) | bool + +- include: ../../common/openshift-management/config.yml +  when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary +  hosts: oo_first_master +  gather_facts: no +  tasks: +  - debug: msg="{{__deprecation_message}}" +    when: +    - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index 4d0bf9531..1e8118490 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -21,5 +21,29 @@  - name: run the std_include    include: ../../common/openshift-cluster/std_include.yml -- name: run the config -  include: ../../common/openshift-cluster/config.yml +- name: perform the installer openshift-checks +  include: ../../common/openshift-checks/install.yml + +- name: etcd install +  include: ../../common/openshift-etcd/config.yml + +- name: include nfs +  include: ../../common/openshift-nfs/config.yml +  when: groups.oo_nfs_to_config | default([]) | count > 0 + +- name: include loadbalancer +  include: ../../common/openshift-loadbalancer/config.yml +  when: groups.oo_lb_to_config | default([]) | count > 0 + +- name: include openshift-master config +  include: ../../common/openshift-master/config.yml + +- name: include master additional config +  include: ../../common/openshift-master/additional_config.yml + +- name: include master additional config +  include: ../../common/openshift-node/config.yml + +- name: include openshift-glusterfs +  include: ../../common/openshift-glusterfs/config.yml +  when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml index df77fe3bc..f5eb01b14 100644 --- a/playbooks/aws/openshift-cluster/prerequisites.yml +++ b/playbooks/aws/openshift-cluster/prerequisites.yml @@ -4,5 +4,3 @@  - include: provision_ssh_keypair.yml  - include: provision_sec_group.yml -  vars: -    openshift_aws_node_group_type: compute diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index e787deced..78dd6a49b 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -6,11 +6,14 @@  - name: Include the provision.yml playbook to create cluster    include: provision.yml -- name: Include the install.yml playbook to install cluster +- name: Include the install.yml playbook to install cluster on masters    include: install.yml -- name: Include the install.yml playbook to install cluster +- name: provision the infra/compute playbook to install node resources    include: provision_nodes.yml  - name: Include the accept.yml playbook to accept nodes into the cluster    include: accept.yml + +- name: Include the hosted.yml playbook to finish the hosted configuration +  include: hosted.yml diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 039357adb..7d74a691a 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -6,7 +6,7 @@    connection: local    gather_facts: no    tasks: -  - name: create an instance and prepare for ami +  - name: create security groups      include_role:        name: openshift_aws        tasks_from: security_group.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 255b0dbf7..f53d34145 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -42,3 +42,7 @@  - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml    when: openshift_hosted_manage_registry | default(true) | bool + +- include: ../../common/openshift-master/revert-client-ca.yml + +- include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0f64f40f3..d9b1fc2ca 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,6 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are  provided in their respective directories.  # Upgrades available +- [OpenShift Container Platform 3.6 to 3.7](v3_7/README.md) (works also to upgrade OpenShift Origin from 3.6.x to 3.7.x)  - [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) -- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) -- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index 697a18c4d..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index 4d284c279..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index 180a2821f..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md deleted file mode 100644 index 85b807dc6..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.4 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index d5329b858..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md deleted file mode 100644 index 53eebe65e..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# v3.5 Major and Minor Upgrade Playbook - -## Overview -This playbook currently performs the -following steps. - - * Upgrade and restart master services - * Unschedule node. - * Upgrade and restart docker - * Upgrade and restart node services - * Modifies the subset of the configuration necessary - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml deleted file mode 100644 index f44d55ad2..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml deleted file mode 100644 index 2377713fa..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../../initialize_groups.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md index 4bf53be81..914e0f5b2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -1,4 +1,4 @@ -# v3.6 Major and Minor Upgrade Playbook +# v3.7 Major and Minor Upgrade Playbook  ## Overview  This playbook currently performs the following steps. diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md index 6892f6324..d9be6ae3b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/README.md @@ -1,11 +1,10 @@ -# v3.3 Major and Minor Upgrade Playbook +# v3.6 Major and Minor Upgrade Playbook  ## Overview -This playbook currently performs the -following steps. +This playbook currently performs the following steps.   * Upgrade and restart master services - * Unschedule node. + * Unschedule node   * Upgrade and restart docker   * Upgrade and restart node services   * Modifies the subset of the configuration necessary @@ -15,4 +14,7 @@ following steps.   * Updates image streams and quickstarts  ## Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml index 8cce91b3f..3d4e6a790 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -4,4 +4,4 @@  #  - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 8e5d0f5f9..d83305119 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -13,4 +13,4 @@  #  - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 5b3f6ab06..a972bb7a6 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -6,4 +6,4 @@  #  - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml index 6701a2e15..93cf6c359 100644 --- a/playbooks/common/openshift-checks/install.yml +++ b/playbooks/common/openshift-checks/install.yml @@ -1,13 +1,15 @@  ---  - name: Health Check Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Health Check 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_health: "In Progress" -      aggregate: false +        installer_phase_health: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: OpenShift Health Checks    hosts: oo_all_hosts @@ -37,11 +39,13 @@        - docker_image_availability  - name: Health Check Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Health Check 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_health: "Complete" -      aggregate: false +        installer_phase_health: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml index 5ddafdb07..359132dd0 100644 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ b/playbooks/common/openshift-cluster/cockpit-ui.yml @@ -3,4 +3,6 @@    hosts: oo_first_master    roles:    - role: cockpit-ui -    when: ( openshift.common.version_gte_3_3_or_1_3  | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) +    when: +    - openshift_hosted_manage_registry | default(true) | bool +    - not openshift.docker.hosted_registry_insecure | default(false) | bool diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml index ec6f2c52c..8a60a30b8 100644 --- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml +++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml @@ -1,13 +1,4 @@  --- -- name: Create persistent volumes -  hosts: oo_first_master -  vars: -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" -  tasks: -  - debug: var=persistent_volumes -  - debug: var=persistent_volume_claims -  - name: Create Hosted Resources - persistent volumes    hosts: oo_first_master    vars: diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index be14b06f0..fe765aa5d 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -5,9 +5,6 @@    hosts: oo_masters_to_config:oo_nodes_to_config    roles:    - openshift_facts -  post_tasks: -  - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" -    when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool  - name: Reconfigure masters to listen on our new dns_port    hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 281ccce2e..15ee60dc0 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,13 +1,15 @@  ---  - name: Hosted Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Hosted install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_hosted: "In Progress" -      aggregate: false +        installer_phase_hosted: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - include: create_persistent_volumes.yml @@ -30,11 +32,13 @@    - openshift_crio_enable_docker_gc | default(False) | bool  - name: Hosted Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Hosted install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_hosted: "Complete" -      aggregate: false +        installer_phase_hosted: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 529a4c939..bc59bd95a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,13 +1,15 @@  ---  - name: Logging Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Logging install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_logging: "In Progress" -      aggregate: false +        installer_phase_logging: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: OpenShift Aggregated Logging    hosts: oo_first_master @@ -23,11 +25,13 @@          tasks_from: update_master_config  - name: Logging Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Logging install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_logging: "Complete" -      aggregate: false +        installer_phase_logging: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 9c0bd489b..80cd93e5f 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,13 +1,15 @@  ---  - name: Metrics Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Metrics install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_metrics: "In Progress" -      aggregate: false +        installer_phase_metrics: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: OpenShift Metrics    hosts: oo_first_master @@ -24,11 +26,13 @@        tasks_from: update_master_config.yaml  - name: Metrics Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Metrics install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_metrics: "Complete" -      aggregate: false +        installer_phase_metrics: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml index a73b294a5..7aa9a16e6 100644 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -1,13 +1,15 @@  ---  - name: Prometheus Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Prometheus install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_prometheus: "In Progress" -      aggregate: false +        installer_phase_prometheus: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Create Hosted Resources - openshift_prometheus    hosts: oo_first_master @@ -15,11 +17,13 @@    - role: openshift_prometheus  - name: Prometheus Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Prometheus install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_prometheus: "Complete" -      aggregate: false +        installer_phase_prometheus: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 2068ed199..eb225dfb5 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -1,11 +1,4 @@  --- -- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2 -  hosts: oo_first_master -  tasks: -  - fail: -      msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." -    when: not openshift.common.version_gte_3_2_or_1_2 | bool -  - name: Check cert expirys    hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config    vars: @@ -43,11 +36,6 @@      when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt'    - modify_yaml:        dest: "{{ openshift.common.config_base }}/master/master-config.yaml" -      yaml_key: servingInfo.clientCA -      yaml_value: ca.crt -    when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' -  - modify_yaml: -      dest: "{{ openshift.common.config_base }}/master/master-config.yaml"        yaml_key: etcdClientInfo.ca        yaml_value: ca-bundle.crt      when: @@ -67,6 +55,13 @@      when:      - groups.oo_etcd_to_config | default([]) | length == 0      - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' +  # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. +  # This change will be reverted in playbooks/byo/openshift-cluster/redeploy-certificates.yml +  - modify_yaml: +      dest: "{{ openshift.common.config_base }}/master/master-config.yaml" +      yaml_key: servingInfo.clientCA +      yaml_value: client-ca-bundle.crt +    when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt'  - name: Copy current OpenShift CA to legacy directory    hosts: oo_masters_to_config @@ -155,6 +150,7 @@      - ca.key      - ca-bundle.crt      - ca.serial.txt +    - client-ca-bundle.crt      delegate_to: "{{ openshift_ca_host }}"      run_once: true      changed_when: false @@ -173,6 +169,7 @@      - ca.key      - ca-bundle.crt      - ca.serial.txt +    - client-ca-bundle.crt    - name: Update master client kubeconfig CA data      kubeclient_ca:        client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index afd5463b2..7e9363c5f 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -70,9 +70,7 @@          --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"          --cert={{ openshift.common.config_base }}/master/registry.crt          --key={{ openshift.common.config_base }}/master/registry.key -        {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}          --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} -        {% endif %}      - name: Update registry certificates secret        oc_secret: diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index bd964b2ce..7bb8511f6 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,13 +1,15 @@  ---  - name: Service Catalog Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Service Catalog install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_servicecatalog: "In Progress" -      aggregate: false +        installer_phase_servicecatalog: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Service Catalog    hosts: oo_first_master @@ -19,11 +21,13 @@      first_master: "{{ groups.oo_first_master[0] }}"  - name: Service Catalog Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Service Catalog install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_servicecatalog: "Complete" -      aggregate: false +        installer_phase_servicecatalog: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 45b34c8bd..fe376fe31 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,15 +1,17 @@  ---  - name: Initialization Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    roles:    - installer_checkpoint    tasks:    - name: Set install initialization 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_initialize: "In Progress" -      aggregate: false +        installer_phase_initialize: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - include: evaluate_groups.yml    tags: @@ -36,11 +38,13 @@    - always  - name: Initialization Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set install initialization 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_initialize: "Complete" -      aggregate: false +        installer_phase_initialize: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 2826951e6..6ad0b6b86 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -9,7 +9,12 @@  - name: Ensure firewall is not switched during upgrade    hosts: oo_all_hosts +  vars: +    openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"    tasks: +  - name: set currently installed version +    set_fact: +      openshift_currently_installed_version: "{{ openshift_master_installed_version }}"    - name: Check if iptables is running      command: systemctl status iptables      changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 122066955..9f93777b4 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -117,7 +117,6 @@    - name: grep pluginOrderOverride      command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml      register: grep_plugin_order_override -    when: openshift.common.version_gte_3_3_or_1_3 | bool      changed_when: false      failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml deleted file mode 100644 index 8cc46ab68..000000000 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# When we update package "a-${version}" and a requires b >= ${version} if we -# don't specify the version of b yum will choose the latest version of b -# available and the whole set of dependencies end up at the latest version. -# Since the package module, unlike the yum module, doesn't flatten a list -# of packages into one transaction we need to do that explicitly. The ansible -# core team tells us not to rely on yum module transaction flattening anyway. - -# TODO: If the sdn package isn't already installed this will install it, we -# should fix that -- name: Upgrade master packages -  package: name={{ master_pkgs | join(',') }} state=present -  vars: -    master_pkgs: -      - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" -      - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" -      - PyYAML -  when: -    - component == "master" -    - not openshift.common.is_atomic | bool - -- name: Upgrade node packages -  package: name={{ node_pkgs | join(',') }} state=present -  vars: -    node_pkgs: -      - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" -      - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" -      - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" -      - PyYAML -  when: -    - component == "node" -    - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index a5e2f7940..399b818a7 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,22 +3,6 @@  # Upgrade Masters  ############################################################################### -# oc adm migrate storage should be run prior to etcd v3 upgrade -# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade all storage -  hosts: oo_first_master -  tasks: -  - name: Upgrade all storage -    command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig -      migrate storage --include=* --confirm -    register: l_pb_upgrade_control_plane_pre_upgrade_storage -    when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool -    failed_when: -    - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool -    - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 -    - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool -  # If facts cache were for some reason deleted, this fact may not be set, and if not set  # it will always default to true. This causes problems for the etcd data dir fact detection  # so we must first make sure this is set correctly before attempting the backup. @@ -48,6 +32,22 @@  - include: create_service_signer_cert.yml +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage +  hosts: oo_first_master +  tasks: +  - name: Upgrade all storage +    command: > +      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      migrate storage --include=* --confirm +    register: l_pb_upgrade_control_plane_pre_upgrade_storage +    when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool +    failed_when: +    - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool +    - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 +    - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool +  # Set openshift_master_facts separately. In order to reconcile  # admission_config's, we currently must run openshift_master_facts and  # then run openshift_facts. @@ -63,13 +63,9 @@    vars:      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1 -  handlers: -  - include: ../../../../roles/openshift_master/handlers/main.yml -    static: yes -  roles: -  - openshift_facts -  - lib_utils -  post_tasks: +  tasks: +  - include_role: +      name: openshift_facts    # Run the pre-upgrade hook if defined:    - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" @@ -78,55 +74,9 @@    - include: "{{ openshift_master_upgrade_pre_hook }}"      when: openshift_master_upgrade_pre_hook is defined -  - include: rpm_upgrade.yml component=master -    when: not openshift.common.is_containerized | bool - -  - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - -  - include: upgrade_scheduler.yml - -  - include: "{{ master_config_hook }}" -    when: master_config_hook is defined - -  - include_vars: ../../../../roles/openshift_master/vars/main.yml - -  - name: Update journald config -    include: ../../../../roles/openshift_master/tasks/journald.yml - -  - name: Remove any legacy systemd units and update systemd units -    include: ../../../../roles/openshift_master/tasks/systemd_units.yml - -  - name: Check for ca-bundle.crt -    stat: -      path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" -    register: ca_bundle_stat -    failed_when: false - -  - name: Check for ca.crt -    stat: -      path: "{{ openshift.common.config_base }}/master/ca.crt" -    register: ca_crt_stat -    failed_when: false - -  - name: Migrate ca.crt to ca-bundle.crt -    command: mv ca.crt ca-bundle.crt -    args: -      chdir: "{{ openshift.common.config_base }}/master" -    when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - -  - name: Link ca.crt to ca-bundle.crt -    file: -      src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" -      path: "{{ openshift.common.config_base }}/master/ca.crt" -      state: link -    when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - -  - name: Update oreg value -    yedit: -      src: "{{ openshift.common.config_base }}/master/master-config.yaml" -      key: 'imageConfig.format' -      value: "{{ oreg_url | default(oreg_url_master) }}" -    when: oreg_url is defined or oreg_url_master is defined +  - include_role: +      name: openshift_master +      tasks_from: upgrade.yml    # Run the upgrade hook prior to restarting services/system if defined:    - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -153,7 +103,9 @@        {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=clusterpolicies --confirm      register: l_pb_upgrade_control_plane_post_upgrade_storage -    when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool +    when: +    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool +    - openshift_version | version_compare('3.7','<')      failed_when:      - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -234,7 +186,6 @@      - reconcile_jenkins_role_binding_result.rc == 0      when:      - openshift_version | version_compare('3.7','<') -    - openshift_version | version_compare('3.4','>=')    - when: openshift_upgrade_target | version_compare('3.7','<')      block: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml deleted file mode 100644 index 8558bf3e9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -# Upgrade predicates -- vars: -    prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" -    prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" -    default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" -    # older_predicates are the set of predicates that have previously been -    # hard-coded into openshift_facts -    older_predicates: -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -      - name: NoVolumeZoneConflict -      - name: MaxEBSVolumeCount -      - name: MaxGCEPDVolumeCount -      - name: Region -        argument: -          serviceAffinity: -            labels: -            - region -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -      - name: NoVolumeZoneConflict -      - name: Region -        argument: -          serviceAffinity: -            labels: -            - region -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -      - name: Region -        argument: -          serviceAffinity: -            labels: -            - region -    # older_predicates_no_region are the set of predicates that have previously -    # been hard-coded into openshift_facts, with the Region predicate removed -    older_predicates_no_region: -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -      - name: NoVolumeZoneConflict -      - name: MaxEBSVolumeCount -      - name: MaxGCEPDVolumeCount -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -      - name: NoVolumeZoneConflict -    - - name: MatchNodeSelector -      - name: PodFitsResources -      - name: PodFitsPorts -      - name: NoDiskConflict -  block: - -  # Handle case where openshift_master_predicates is defined -  - block: -    - debug: -        msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" -      when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - -    - debug: -        msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" -      when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates -    when: openshift_master_scheduler_predicates | default(none) is not none - -  # Handle cases where openshift_master_predicates is not defined -  - block: -    - debug: -        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" -      when: -      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates -      - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - -    - set_fact: -        openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" -      when: -      - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates -      - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - -    - set_fact: -        openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" -      when: -      - openshift_master_scheduler_current_predicates != default_predicates_no_region -      - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - -    when: openshift_master_scheduler_predicates | default(none) is none - - -# Upgrade priorities -- vars: -    prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" -    prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" -    default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" -    # older_priorities are the set of priorities that have previously been -    # hard-coded into openshift_facts -    older_priorities: -    - - name: LeastRequestedPriority -        weight: 1 -      - name: SelectorSpreadPriority -        weight: 1 -      - name: Zone -        weight: 2 -        argument: -          serviceAntiAffinity: -            label: zone -    # older_priorities_no_region are the set of priorities that have previously -    # been hard-coded into openshift_facts, with the Zone priority removed -    older_priorities_no_zone: -    - - name: LeastRequestedPriority -        weight: 1 -      - name: SelectorSpreadPriority -        weight: 1 -  block: - -  # Handle case where openshift_master_priorities is defined -  - block: -    - debug: -        msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" -      when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - -    - debug: -        msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" -      when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities -    when: openshift_master_scheduler_priorities | default(none) is not none - -  # Handle cases where openshift_master_priorities is not defined -  - block: -    - debug: -        msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" -      when: -      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities -      - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - -    - set_fact: -        openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" -      when: -      - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities -      - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - -    - set_fact: -        openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" -      when: -      - openshift_master_scheduler_current_priorities != default_priorities_no_zone -      - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - -    when: openshift_master_scheduler_priorities | default(none) is none - - -# Update scheduler -- vars: -    scheduler_config: -      kind: Policy -      apiVersion: v1 -      predicates: "{{ openshift_upgrade_scheduler_predicates -                      | default(openshift_master_scheduler_current_predicates) }}" -      priorities: "{{ openshift_upgrade_scheduler_priorities -                      | default(openshift_master_scheduler_current_priorities) }}" -  block: -  - name: Update scheduler config -    copy: -      content: "{{ scheduler_config | to_nice_json }}" -      dest: "{{ openshift_master_scheduler_conf }}" -      backup: true -  when: > -    openshift_upgrade_scheduler_predicates is defined or -    openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml deleted file mode 100644 index 5e7a66171..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' -    yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' -    yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' -    yaml_value: 400 - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' -    yaml_value: 200 - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' -    yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' -    yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' -    yaml_value: 600 - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' -    yaml_value: 300 - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' -    yaml_value: service-signer.crt - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' -    yaml_value: service-signer.key - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'admissionConfig.pluginConfig' -    yaml_value: "{{ openshift.master.admission_plugin_config }}" -  when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'admissionConfig.pluginOrderOverride' -    yaml_value: - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'kubernetesMasterConfig.admissionConfig' -    yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml deleted file mode 100644 index 89b524f14..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/node/node-config.yaml" -    yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' -    yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/node/node-config.yaml" -    yaml_key: 'masterClientConnectionOverrides.contentType' -    yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/node/node-config.yaml" -    yaml_key: 'masterClientConnectionOverrides.burst' -    yaml_value: 40 - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/node/node-config.yaml" -    yaml_key: 'masterClientConnectionOverrides.qps' -    yaml_value: 20 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index a241ef039..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" -      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- include: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml -  vars: -    node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index 54c85f0fb..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" -      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index cee4e9087..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" -      openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade -  tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml -  vars: -    node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' -    yaml_value: service-signer.crt - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' -    yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml deleted file mode 100644 index ae217ba2e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ /dev/null @@ -1,116 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" -      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- include: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml deleted file mode 100644 index d7cb38d03..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" -      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml deleted file mode 100644 index 8531e6045..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ /dev/null @@ -1,111 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml -  tags: -  - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" -      openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- include: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- include: ../../initialize_openshift_version.yml -  tags: -  - pre_upgrade -  vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade -  tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- include: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' -    yaml_value: service-signer.crt - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' -    yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml deleted file mode 100644 index ae63c9ca9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -############################################################################### -# Pre upgrade checks for known data problems, if this playbook fails you should -# contact support. If you're not supported contact users@lists.openshift.com -# -# oc_objectvalidator provides these two checks -# 1 - SDN Data issues, never seen in the wild but known possible due to code audits -#     https://github.com/openshift/origin/issues/12697 -# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 -# -############################################################################### -- name: Verify 3.5 specific upgrade checks -  hosts: oo_first_master -  roles: -  - { role: lib_openshift } -  tasks: -  - name: Check for invalid namespaces and SDN errors -    oc_objectvalidator: - -  # What's all this PetSet business about? -  # -  # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are -  # no longer supported. The BETA resource 'StatefulSets' replaces -  # them. We can't migrate clients PetSets to -  # StatefulSets. Additionally, Red Hat has never officially supported -  # these resource types. Sorry users, but if you were using -  # unsupported resources from the Kube documentation then we can't -  # help you at this time. -  # -  # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 -  - name: Check if legacy PetSets exist -    oc_obj: -      state: list -      all_namespaces: true -      kind: petsets -    register: l_do_petsets_exist - -  - name: Fail on unsupported resource migration 'PetSets' -    fail: -      msg: > -        PetSet objects were detected in your cluster. These are an -        Alpha feature in upstream Kubernetes 1.4 and are not supported -        by Red Hat. In Kubernetes 1.5, they are replaced by the Beta -        feature StatefulSets. Red Hat currently does not offer support -        for either PetSets or StatefulSets. - -        Automatically migrating PetSets to StatefulSets in OpenShift -        Container Platform (OCP) 3.5 is not supported. See the -        Kubernetes "Upgrading from PetSets to StatefulSets" -        documentation for additional information: - -        https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ - -        PetSets MUST be removed before upgrading to OCP 3.5. Red Hat -        strongly recommends reading the above referenced documentation -        in its entirety before taking any destructive actions. - -        If you want to simply remove all PetSets without manually -        migrating to StatefulSets, run this command as a user with -        cluster-admin privileges: - -        $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false -    when: -    # Search did not fail, valid resource type found -    - l_do_petsets_exist.results.returncode == 0 -    # Items do exist in the search results -    - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml deleted file mode 100644 index db0c8f886..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' -    yaml_value: service-signer.crt - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' -    yaml_value: service-signer.key - -- modify_yaml: -    dest: "{{ openshift.common.config_base }}/master/master-config.yaml" -    yaml_key: servingInfo.clientCA -    yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 8e4f99c91..74d0cd8ad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -11,13 +11,15 @@    tasks:    - name: Check for invalid namespaces and SDN errors      oc_objectvalidator: - +  # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO    - name: Confirm OpenShift authorization objects are in sync      command: >        {{ openshift.common.client_binary }} adm migrate authorization -    when: openshift_version | version_compare('3.7','<') +    when: +    - openshift_currently_installed_version | version_compare('3.7','<') +    - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool      changed_when: false      register: l_oc_result      until: l_oc_result.rc == 0 -    retries: 4 +    retries: 2      delay: 15 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins index 7de3c1dd7..7de3c1dd7 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml index 1d4d1919c..1d4d1919c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles index 415645be6..415645be6 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index bda245fe1..b3162bd5f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -12,8 +12,8 @@    - pre_upgrade    tasks:    - set_fact: -      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_target: '3.8' +      openshift_upgrade_min: '3.7'  # Pre-upgrade @@ -21,6 +21,10 @@    tags:    - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml +  tags: +  - pre_upgrade +  - name: Update repos and initialize facts on all hosts    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config    tags: @@ -47,6 +51,10 @@    tags:    - pre_upgrade +- include: ../pre/verify_health_checks.yml +  tags: +  - pre_upgrade +  - include: ../pre/verify_control_plane_running.yml    tags:    - pre_upgrade @@ -113,7 +121,21 @@  - include: ../upgrade_control_plane.yml    vars: -    master_config_hook: "v3_5/master_config_upgrade.yml" +    master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode +  hosts: oo_masters_to_config +  gather_facts: no +  tasks: +  - name: Stop {{ openshift.common.service_type }}-master-controllers +    systemd: +      name: "{{ openshift.common.service_type }}-master-controllers" +      state: stopped +  - name: Start {{ openshift.common.service_type }}-master-controllers +    systemd: +      name: "{{ openshift.common.service_type }}-master-controllers" +      state: started  - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 6cdea7b84..3df5b17b5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -21,14 +21,18 @@    - pre_upgrade    tasks:    - set_fact: -      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_target: '3.8' +      openshift_upgrade_min: '3.7'  # Pre-upgrade  - include: ../initialize_nodes_to_upgrade.yml    tags:    - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml +  tags: +  - pre_upgrade +  - name: Update repos on control plane hosts    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    tags: @@ -55,6 +59,10 @@    tags:    - pre_upgrade +- include: ../pre/verify_health_checks.yml +  tags: +  - pre_upgrade +  - include: ../pre/verify_control_plane_running.yml    tags:    - pre_upgrade @@ -117,6 +125,20 @@  - include: ../upgrade_control_plane.yml    vars: -    master_config_hook: "v3_5/master_config_upgrade.yml" +    master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode +  hosts: oo_masters_to_config +  gather_facts: no +  tasks: +  - name: Stop {{ openshift.common.service_type }}-master-controllers +    systemd: +      name: "{{ openshift.common.service_type }}-master-controllers" +      state: stopped +  - name: Start {{ openshift.common.service_type }}-master-controllers +    systemd: +      name: "{{ openshift.common.service_type }}-master-controllers" +      state: started  - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index e29d0f8e6..f3d192ba7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -14,8 +14,8 @@    - pre_upgrade    tasks:    - set_fact: -      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" -      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" +      openshift_upgrade_target: '3.8' +      openshift_upgrade_min: '3.7'  # Pre-upgrade  - include: ../initialize_nodes_to_upgrade.yml @@ -48,6 +48,10 @@    tags:    - pre_upgrade +- include: ../pre/verify_health_checks.yml +  tags: +  - pre_upgrade +  - include: ../disable_node_excluders.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks +  hosts: oo_first_master +  roles: +  - { role: lib_openshift } +  tasks: +  - debug: msg="noop" diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 48d46bbb0..3fe483785 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,13 +1,15 @@  ---  - name: etcd Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set etcd install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_etcd: "In Progress" -      aggregate: false +        installer_phase_etcd: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - include: ca.yml @@ -26,11 +28,13 @@    - role: nickhammond.logrotate  - name: etcd Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set etcd install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_etcd: "Complete" -      aggregate: false +        installer_phase_etcd: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index c2ae5f313..19e14ab3e 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,13 +1,15 @@  ---  - name: GlusterFS Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set GlusterFS install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_glusterfs: "In Progress" -      aggregate: false +        installer_phase_glusterfs: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Open firewall ports for GlusterFS nodes    hosts: glusterfs @@ -46,11 +48,13 @@      when: groups.oo_glusterfs_to_config | default([]) | count > 0  - name: GlusterFS Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set GlusterFS install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_glusterfs: "Complete" -      aggregate: false +        installer_phase_glusterfs: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index 2a703cb61..d737b836b 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -1,13 +1,15 @@  ---  - name: Load Balancer Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set load balancer install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_loadbalancer: "In Progress" -      aggregate: false +        installer_phase_loadbalancer: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Configure firewall and docker for load balancers    hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config @@ -37,11 +39,13 @@    - role: tuned  - name: Load Balancer Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set load balancer install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_loadbalancer: "Complete" -      aggregate: false +        installer_phase_loadbalancer: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml index 908679e81..3f1cdf713 100644 --- a/playbooks/common/openshift-management/config.yml +++ b/playbooks/common/openshift-management/config.yml @@ -1,13 +1,15 @@  ---  - name: Management Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Management install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_management: "In Progress" -      aggregate: false +        installer_phase_management: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Setup CFME    hosts: oo_first_master @@ -25,11 +27,13 @@        template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"  - name: Management Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Management install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_management: "Complete" -      aggregate: false +        installer_phase_management: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index 350557f19..4fef5b923 100644 --- a/playbooks/common/openshift-master/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml @@ -1,13 +1,15 @@  ---  - name: Master Additional Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Master Additional install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_master_additional: "In Progress" -      aggregate: false +        installer_phase_master_additional: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Additional master configuration    hosts: oo_first_master @@ -36,11 +38,13 @@      when: openshift_use_flannel | default(false) | bool  - name: Master Additional Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Master Additional install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_master_additional: "Complete" -      aggregate: false +        installer_phase_master_additional: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index b359919ba..6b0fd6b7c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,13 +1,15 @@  ---  - name: Master Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Master install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_master: "In Progress" -      aggregate: false +        installer_phase_master: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - include: certificates.yml @@ -212,6 +214,12 @@        tasks_from: master      when: openshift_use_kuryr | default(false) | bool +  - name: Setup the node group config maps +    include_role: +      name: openshift_node_group +    when: openshift_master_bootstrap_enabled | default(false) | bool +    run_once: True +    post_tasks:    - name: Create group for deployment type      group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} @@ -232,11 +240,13 @@      r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"  - name: Master Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Master install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_master: "Complete" -      aggregate: false +        installer_phase_master: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 4f8b758fd..4e1b3a3be 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,22 +1,4 @@  --- -- name: Restart master API -  service: -    name: "{{ openshift.common.service_type }}-master-api" -    state: restarted -  when: openshift_master_ha | bool -- name: Wait for master API to come back online -  wait_for: -    host: "{{ openshift.common.hostname }}" -    state: started -    delay: 10 -    port: "{{ openshift.master.api_port }}" -    timeout: 600 -  when: openshift_master_ha | bool -- name: Restart master controllers -  service: -    name: "{{ openshift.common.service_type }}-master-controllers" -    state: restarted -  # Ignore errrors since it is possible that type != simple for -  # pre-3.1.1 installations. -  ignore_errors: true -  when: openshift_master_ha | bool +- include_role: +    name: openshift_master +    tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/common/openshift-master/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config +  hosts: oo_masters_to_config +  tasks: +  - name: Read master config +    slurp: +      src: "{{ openshift.common.config_base }}/master/master-config.yaml" +    register: g_master_config_output + +  # servingInfo.clientCA may be set as the client-ca-bundle.crt from +  # CA redeployment and this task reverts that change. +  - name: Set servingInfo.clientCA = ca.crt in master config +    modify_yaml: +      dest: "{{ openshift.common.config_base }}/master/master-config.yaml" +      yaml_key: servingInfo.clientCA +      yaml_value: ca.crt +    when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index f4dc9df8a..4c415ebce 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -22,16 +22,17 @@    - name: restart master api      service: name={{ openshift.common.service_type }}-master-controllers state=restarted      notify: verify api server +  # We retry the controllers because the API may not be 100% initialized yet.    - name: restart master controllers -    service: name={{ openshift.common.service_type }}-master-controllers state=restarted +    command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" +    retries: 3 +    delay: 5 +    register: result +    until: result.rc == 0    - name: verify api server      command: >        curl --silent --tlsv1.2 -      {% if openshift.common.version_gte_3_2_or_1_2 | bool %}        --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt -      {% else %} -      --cacert {{ openshift.common.config_base }}/master/ca.crt -      {% endif %}        {{ openshift.master.api_url }}/healthz/ready      args:        # Disables the following warning: diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml index 560eea785..97acc5d5d 100644 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml @@ -136,9 +136,15 @@    when:    - not front_proxy_kubeconfig.stat.exists -- name: copy tech preview extension file for service console UI -  copy: -    src: openshift-ansible-catalog-console.js +- name: Delete temp directory +  file: +    name: "{{ certtemp.stdout }}" +    state: absent +  changed_when: False + +- name: Setup extension file for service console UI +  template: +    src: ../templates/openshift-ansible-catalog-console.js      dest: /etc/origin/master/openshift-ansible-catalog-console.js  - name: Update master config @@ -179,8 +185,13 @@    - yedit_output.changed    - openshift.master.cluster_method == 'native' +# We retry the controllers because the API may not be 100% initialized yet.  - name: restart master controllers -  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted +  command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" +  retries: 3 +  delay: 5 +  register: result +  until: result.rc == 0    when:    - yedit_output.changed    - openshift.master.cluster_method == 'native' @@ -190,11 +201,7 @@    # wait_for port doesn't provide health information.    command: >      curl --silent --tlsv1.2 -    {% if openshift.common.version_gte_3_2_or_1_2 | bool %}      --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt -    {% else %} -    --cacert {{ openshift.common.config_base }}/master/ca.crt -    {% endif %}      {{ openshift.master.api_url }}/healthz/ready    args:      # Disables the following warning: @@ -207,9 +214,3 @@    changed_when: false    when:    - yedit_output.changed - -- name: Delete temp directory -  file: -    name: "{{ certtemp.stdout }}" -    state: absent -  changed_when: False diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index ce672daf5..6ea77e00b 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -1,13 +1,15 @@  ---  - name: NFS Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set NFS install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_nfs: "In Progress" -      aggregate: false +        installer_phase_nfs: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - name: Configure nfs    hosts: oo_nfs_to_config @@ -16,11 +18,13 @@    - role: openshift_storage_nfs  - name: NFS Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set NFS install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_nfs: "Complete" -      aggregate: false +        installer_phase_nfs: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 4f8f98aef..28e3c1b1b 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,13 +1,15 @@  ---  - name: Node Install Checkpoint Start -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Node install 'In Progress' +    run_once: true      set_stats:        data: -        installer_phase_node: "In Progress" -      aggregate: false +        installer_phase_node: +          status: "In Progress" +          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"  - include: certificates.yml @@ -24,11 +26,13 @@  - include: enable_excluders.yml  - name: Node Install Checkpoint End -  hosts: oo_all_hosts +  hosts: all    gather_facts: false    tasks:    - name: Set Node install 'Complete' +    run_once: true      set_stats:        data: -        installer_phase_node: "Complete" -      aggregate: false +        installer_phase_node: +          status: "Complete" +          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" | 
