summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc/upgrades/upgrade.yml
blob: 8c113879755e98e50286c8104e0e480cc3ad39c7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
---
- name: Verify upgrade can proceed
  hosts: masters
  tasks:
  # Checking the global deployment type rather than host facts, this is about
  # what the user is requesting.
    - fail: msg="Deployment type enterprise not supported for upgrade"
      when: deployment_type == "enterprise"

- name: Backup etcd
  hosts: masters
  vars:
    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  roles:
  - openshift_facts
  tasks:
  - stat: path=/var/lib/openshift
    register: var_lib_openshift
  - name: Create origin symlink if necessary
    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
    when: var_lib_openshift.stat.exists == True
  - name: Check available disk space for etcd backup
    # We assume to be using the data dir for all backups.
    shell: >
      df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
    register: avail_disk

  - name: Check current embedded etcd disk usage
    shell: >
      du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
    register: etcd_disk_usage
    when: embedded_etcd | bool

  - name: Abort if insufficient disk space for etcd backup
    fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  - name: Install etcd (for etcdctl)
    yum: pkg=etcd state=latest
  - name: Generate etcd backup
    command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  - name: Display location of etcd backup
    debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"

- name: Upgrade base package on masters
  hosts: masters
  roles:
  - openshift_facts
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade base package
      yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=latest

- name: Evaluate oo_first_master
  hosts: localhost
  vars:
    g_masters_group: "{{ 'masters' }}"
  tasks:
    - name: display all variables set for the current host
      debug:
        var: hostvars[inventory_hostname]
    - name: Evaluate oo_first_master
      add_host:
        name: "{{ groups[g_masters_group][0] }}"
        groups: oo_first_master
        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
        ansible_sudo: "{{ g_sudo | default(omit) }}"
      when: g_masters_group in groups and (groups[g_masters_group] | length) > 0

# TODO: ideally we would check the new version, without installing it. (some
# kind of yum repoquery? would need to handle openshift -> atomic-openshift
# package rename)
- name: Perform upgrade version checking
  hosts: oo_first_master
  tasks:
    - name: Determine new version
      command: >
        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
      register: _new_version

- name: Ensure AOS 3.0.2 or Origin 1.0.6
  hosts: oo_first_master
  tasks:
    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )

- name: Verify upgrade can proceed
  hosts: oo_first_master
  tasks:
  # Checking the global deployment type rather than host facts, this is about
  # what the user is requesting.
  - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
    when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))

      #- name: Re-Run cluster configuration to apply latest configuration changes
      #  include: ../../common/openshift-cluster/config.yml
      #  vars:
      #    g_etcd_group: "{{ 'etcd' }}"
      #    g_masters_group: "{{ 'masters' }}"
      #    g_nodes_group: "{{ 'nodes' }}"
      #    openshift_cluster_id: "{{ cluster_id | default('default') }}"
      #    openshift_deployment_type: "{{ deployment_type }}"

- name: Upgrade masters
  hosts: masters
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade to latest available kernel
      yum: pkg=kernel state=latest
    - name: Upgrade master packages
      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
    - name: Upgrade master configuration.
      openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
    - name: Restart master services
      service: name="{{ openshift.common.service_type}}-master" state=restarted

- name: Upgrade nodes
  hosts: nodes
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  roles:
  - openshift_facts
  tasks:
    - name: Upgrade node packages
      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
    - name: Restart node services
      service: name="{{ openshift.common.service_type }}-node" state=restarted

- name: Update cluster policy
  hosts: oo_first_master
  tasks:
    - name: oadm policy reconcile-cluster-roles --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-roles --confirm

- name: Update cluster policy bindings
  hosts: oo_first_master
  tasks:
    - name: oadm policy reconcile-cluster-role-bindings --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-role-bindings
        --exclude-groups=system:authenticated
        --exclude-groups=system:unauthenticated
        --exclude-users=system:anonymous
        --additive-only=true --confirm
      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')

- name: Upgrade default router
  hosts: oo_first_master
  vars:
    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default router
      command: >
        {{ oc_cmd }} get -n default dc/router
      register: _default_router
      failed_when: false
      changed_when: false
    - name: Check for allowHostNetwork and allowHostPorts
      when: _default_router.rc == 0
      shell: >
        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
      register: _scc
    - name: Grant allowHostNetwork and allowHostPorts
      when:
        - _default_router.rc == 0
        - "'false' in _scc.stdout"
      command: >
        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
    - name: Update deployment config to 1.0.4/3.0.1 spec
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
    - name: Switch to hostNetwork=true
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
    - name: Update router image to current version
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'

- name: Upgrade default
  hosts: oo_first_master
  vars:
    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default registry
      command: >
          {{ oc_cmd }} get -n default dc/docker-registry
      register: _default_registry
      failed_when: false
      changed_when: false
    - name: Update registry image to current version
      when: _default_registry.rc == 0
      command: >
        {{ oc_cmd }} patch dc/docker-registry -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'

- name: Update image streams and templates
  hosts: oo_first_master
  vars:
    openshift_examples_import_command: "update"
    openshift_deployment_type: "{{ deployment_type }}"
  roles:
    - openshift_examples