summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc/upgrades/upgrade.yml
blob: c113c7ab2c9c334b7f8c25f50c6149c62631cef9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
---
- name: Verify upgrade can proceed
  hosts: masters
  tasks:
  # Checking the global deployment type rather than host facts, this is about
  # what the user is requesting.
    - fail: msg="Deployment type enterprise not supported for upgrade"
      when: deployment_type == "enterprise"

- name: Update deployment type
  hosts: OSEv3
  roles:
  - openshift_facts
  post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit.
  - openshift_facts:
      role: common
      local_facts:
        deployment_type: "{{ deployment_type }}"

- name: Backup etcd
  hosts: masters
  vars:
    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  roles:
  - openshift_facts
  tasks:
  - stat: path=/var/lib/openshift
    register: var_lib_openshift
  - name: Create origin symlink if necessary
    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
    when: var_lib_openshift.stat.exists == True
  - name: Check available disk space for etcd backup
    # We assume to be using the data dir for all backups.
    shell: >
      df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
    register: avail_disk

  - name: Check current embedded etcd disk usage
    shell: >
      du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
    register: etcd_disk_usage
    when: embedded_etcd | bool

  - name: Abort if insufficient disk space for etcd backup
    fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  - name: Install etcd (for etcdctl)
    yum: pkg=etcd state=latest
  - name: Generate etcd backup
    command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  - name: Display location of etcd backup
    debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"

- name: Perform upgrade version checking
  hosts: masters[0]
  tasks:
    - name: Determine available version
      shell: >
        yum list available {{ openshift.common.service_type }} | tail -n 1 | cut -f 2 -d " " | cut -f 1 -d "-"
      register: _new_version
    - debug: var=_new_version
    # The above check will return nothing if the package is already installed,
    # and we may be re-running upgrade due to a failure.
    - name: Determine installed version
      command: >
        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
      register: _new_version
      when: _new_version.stdout == ""
    # Fail if we still don't know:
    - debug: var=_new_version
    - name: Verify upgrade version
      fail: Unable to determine upgrade version for {{ openshift.common.service_type }}
      when: _new_version.stdout == ""

- name: Ensure AOS 3.0.2 or Origin 1.0.6
  hosts: masters[0]
  tasks:
    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )

- name: Verify upgrade can proceed
  hosts: masters[0]
  tasks:
  # Checking the global deployment type rather than host facts, this is about
  # what the user is requesting.
  - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
    when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))

- name: Upgrade masters
  hosts: masters
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade to latest available kernel
      yum: pkg=kernel state=latest
    - name: Upgrade master packages
      command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
    - name: Upgrade master configuration.
      openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master config_base={{ hostvars[inventory_hostname].openshift.common.config_base }}
    - name: Restart master services
      service: name="{{ openshift.common.service_type}}-master" state=restarted

- name: Upgrade nodes
  hosts: nodes
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  roles:
  - openshift_facts
  tasks:
    - name: Upgrade node packages
      command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
    - name: Restart node services
      service: name="{{ openshift.common.service_type }}-node" state=restarted

- name: Update cluster policy
  hosts: masters[0]
  tasks:
    - name: oadm policy reconcile-cluster-roles --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-roles --confirm

- name: Update cluster policy bindings
  hosts: masters[0]
  tasks:
    - name: oadm policy reconcile-cluster-role-bindings --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-role-bindings
        --exclude-groups=system:authenticated
        --exclude-groups=system:unauthenticated
        --exclude-users=system:anonymous
        --additive-only=true --confirm
      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')

- name: Upgrade default router
  hosts: masters[0]
  vars:
    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default router
      command: >
        {{ oc_cmd }} get -n default dc/router
      register: _default_router
      failed_when: false
      changed_when: false
    - name: Check for allowHostNetwork and allowHostPorts
      when: _default_router.rc == 0
      shell: >
        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
      register: _scc
    - name: Grant allowHostNetwork and allowHostPorts
      when:
        - _default_router.rc == 0
        - "'false' in _scc.stdout"
      command: >
        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
    - name: Update deployment config to 1.0.4/3.0.1 spec
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
    - name: Switch to hostNetwork=true
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
    - name: Update router image to current version
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'

- name: Upgrade default
  hosts: masters[0]
  vars:
    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default registry
      command: >
          {{ oc_cmd }} get -n default dc/docker-registry
      register: _default_registry
      failed_when: false
      changed_when: false
    - name: Update registry image to current version
      when: _default_registry.rc == 0
      command: >
        {{ oc_cmd }} patch dc/docker-registry -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'

- name: Update image streams and templates
  hosts: masters[0]
  vars:
    openshift_examples_import_command: "update"
    openshift_deployment_type: "{{ deployment_type }}"
  roles:
    - openshift_examples