diff options
| author | Devan Goodwin <dgoodwin@redhat.com> | 2016-09-12 15:50:32 -0300 | 
|---|---|---|
| committer | Devan Goodwin <dgoodwin@redhat.com> | 2016-09-29 10:25:58 -0300 | 
| commit | 9dcc8fc7123e1f13e945a658ffe7331730b0105f (patch) | |
| tree | 3bd75059ff30727706a82c43ac6378a34a03e114 | |
| parent | 6f056fd9673428c00b5e496a9a084cf09ad777cf (diff) | |
Split upgrade for control plane/nodes.
14 files changed, 173 insertions, 125 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 5a95b5fdb..87a8ef66c 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -12,7 +12,7 @@        openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"  # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml  - name: Update repos and initialize facts on all hosts    hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config @@ -33,7 +33,7 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml    vars:      # Request specific openshift_release and let the openshift_version role handle converting this      # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -45,7 +45,7 @@      # docker_version defined, we don't want to actually do it until later)      docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,15 +55,29 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml -  #  vars: -  #    openshift_deployment_type: "{{ deployment_type }}" +- name: Exit upgrade if dry-run specified +  hosts: oo_all_hosts +  tasks: +  - fail: +      msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." +    when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  tasks: +  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml -- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml    vars: -    openshift_deployment_type: "{{ deployment_type }}"      master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +  vars:      node_config_hook: "v3_3/node_config_upgrade.yml"  - include: ../../../openshift-master/restart.yml -- include: ../../../../common/openshift-cluster/upgrades/post.yml +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index 94339dd63..bcc304141 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_masters.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -2,7 +2,14 @@  #  # Control Plane Upgrade Playbook  # -# Upgrades masters and etcd. +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.  #  - include: ../../../../common/openshift-cluster/upgrades/init.yml @@ -14,10 +21,10 @@        openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"  # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config +- name: Update repos on control plane hosts +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    roles:    - openshift_repos @@ -35,7 +42,7 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml    vars:      # Request specific openshift_release and let the openshift_version role handle converting this      # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -47,7 +54,7 @@      # docker_version defined, we don't want to actually do it until later)      docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -56,3 +63,24 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml  - include: ../../../../common/openshift-cluster/upgrades/pre/backup_etcd.yml + +- name: Exit upgrade if dry-run specified +  hosts: oo_all_hosts +  tasks: +  - fail: +      msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." +    when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 9d29ba1ab..e79df1a02 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -14,9 +14,9 @@        openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"  # Pre-upgrade -- include: ../initialize_facts.yml +- include: ../../../../common/openshift-cluster/initialize_facts.yml -- name: Update repos and initialize facts on all hosts +- name: Update repos on nodes    hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config    roles:    - openshift_repos @@ -35,7 +35,7 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml -- include: ../initialize_openshift_version.yml +- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml    vars:      # Request specific openshift_release and let the openshift_version role handle converting this      # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if @@ -47,7 +47,20 @@      # docker_version defined, we don't want to actually do it until later)      docker_protect_installed_version: True -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: openshift.common.version != openshift_version + +- name: Exit upgrade if dry-run specified +  hosts: oo_all_hosts +  tasks: +  - fail: +      msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." +    when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool + +- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml  - include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -55,8 +68,14 @@  - include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images +  hosts: oo_nodes_to_config    tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version +  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml + +- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +  vars: +    node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 04dde632b..6d83d2527 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -11,3 +11,5 @@          hostname: "{{ openshift_hostname | default(None) }}"    - set_fact:        openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +  - set_fact: +      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml new file mode 100644 index 000000000..6e953be69 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml @@ -0,0 +1,22 @@ +--- +- name: Check Docker image count +  shell: "docker images -aq | wc -l" +  register: docker_image_count +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Remove unused Docker images for Docker 1.10+ migration +  shell: "docker rmi `docker images -aq`" +  # Will fail on images still in use: +  failed_when: false +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- name: Check Docker image count +  shell: "docker images -aq | wc -l" +  register: docker_image_count +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool + +- debug: var=docker_image_count.stdout +  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3bc70a72..03c4a3112 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -6,7 +6,7 @@    become: no    gather_facts: no    tasks: -  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index e43954453..e43954453 100644 --- a/playbooks/common/openshift-cluster/upgrades/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml deleted file mode 100644 index a2d231c59..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -############################################################################### -# Backup etcd -############################################################################### - diff --git a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml index 994ac2bb9..3164b43ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/backup_etcd.yml @@ -85,9 +85,3 @@        msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"      when: etcd_backup_failed | length > 0 -- name: Exit upgrade if dry-run specified -  hosts: oo_first_master -  tasks: -  - fail: -      msg: "Pre-upgrade checks completed, exiting due to openshift_upgrade_dry_run variable." -    when: openshift_upgrade_dry_run is defined and openshift_upgrade_dry_run | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml index 635172de9..d8b282b41 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml @@ -4,7 +4,7 @@    tasks:    # Only check if docker upgrade is required if docker_upgrade is not    # already set to False. -  - include: docker/upgrade_check.yml +  - include: ../docker/upgrade_check.yml      when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool    # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c4ce5fef6..5d74e0d10 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -1,37 +1,5 @@  ---  ############################################################################### -# The restart playbook should be run after this playbook completes. -############################################################################### - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config -  tasks: -  - name: Check Docker image count -    shell: "docker images -aq | wc -l" -    register: docker_image_count -    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -  - debug: var=docker_image_count.stdout -    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -  - name: Remove unused Docker images for Docker 1.10+ migration -    shell: "docker rmi `docker images -aq`" -    # Will fail on images still in use: -    failed_when: false -    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -  - name: Check Docker image count -    shell: "docker images -aq | wc -l" -    register: docker_image_count -    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -  - debug: var=docker_image_count.stdout -    when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -###############################################################################  # Upgrade Masters  ###############################################################################  - name: Upgrade master packages @@ -195,68 +163,12 @@        msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"      when: reconcile_failed | length > 0 -############################################################################### -# Upgrade Nodes -############################################################################### - -# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service) -- name: Perform upgrades that may require node evacuation -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config +- name: Upgrade Docker on dedicated containerized etcd hosts +  hosts: oo_etcd_to_config:!oo_nodes_to_config    serial: 1    any_errors_fatal: true    roles:    - openshift_facts -  handlers: -  - include: ../../../../roles/openshift_node/handlers/main.yml -    static: yes    tasks: -  # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node -  # or docker actually needs an upgrade before proceeding. Perhaps best to save this until -  # we merge upgrade functionality into the base roles and a normal config.yml playbook run. -  - name: Determine if node is currently scheduleable -    command: > -      {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json -    register: node_output -    delegate_to: "{{ groups.oo_first_master.0 }}" -    changed_when: false -    when: inventory_hostname in groups.oo_nodes_to_config - -  - set_fact: -      was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" -    when: inventory_hostname in groups.oo_nodes_to_config - -  - name: Mark unschedulable if host is a node -    command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false -    delegate_to: "{{ groups.oo_first_master.0 }}" -    when: inventory_hostname in groups.oo_nodes_to_config - -  - name: Evacuate Node for Kubelet upgrade -    command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force -    delegate_to: "{{ groups.oo_first_master.0 }}" -    when: inventory_hostname in groups.oo_nodes_to_config -    - include: docker/upgrade.yml      when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool -  - include: "{{ node_config_hook }}" -    when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config - -  - include: rpm_upgrade.yml -    vars: -       component: "node" -       openshift_version: "{{ openshift_pkg_version | default('') }}" -    when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool - -  - include: containerized_node_upgrade.yml -    when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool - -  - meta: flush_handlers - -  - name: Set node schedulability -    command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true -    delegate_to: "{{ groups.oo_first_master.0 }}" -    when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool - - diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml new file mode 100644 index 000000000..0ab8ba23c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -0,0 +1,60 @@ +--- +- name: Evacuate and upgrade nodes +  hosts: oo_nodes_to_config +  serial: 1 +  any_errors_fatal: true +  roles: +  - openshift_facts +  handlers: +  - include: ../../../../roles/openshift_node/handlers/main.yml +    static: yes +  tasks: +  # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node +  # or docker actually needs an upgrade before proceeding. Perhaps best to save this until +  # we merge upgrade functionality into the base roles and a normal config.yml playbook run. +  - name: Determine if node is currently scheduleable +    command: > +      {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json +    register: node_output +    delegate_to: "{{ groups.oo_first_master.0 }}" +    changed_when: false +    when: inventory_hostname in groups.oo_nodes_to_config + +  - set_fact: +      was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" +    when: inventory_hostname in groups.oo_nodes_to_config + +  - name: Mark unschedulable if host is a node +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: inventory_hostname in groups.oo_nodes_to_config + +  - name: Evacuate Node for Kubelet upgrade +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: inventory_hostname in groups.oo_nodes_to_config + +  - include: docker/upgrade.yml +    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + +  - include: "{{ node_config_hook }}" +    when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config + +  - include: rpm_upgrade.yml +    vars: +       component: "node" +       openshift_version: "{{ openshift_pkg_version | default('') }}" +    when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool + +  - include: containerized_node_upgrade.yml +    when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + +  - meta: flush_handlers + +  - name: Set node schedulability +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool  | 
