summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc/upgrades/upgrade.yml
blob: 11d89a3da8e30deb33c820cd1a7c342705ab85bf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
---
- name: Verify upgrade can proceed
  hosts: masters
  tasks:
  # Checking the global deployment type rather than host facts, this is about
  # what the user is requesting.
    - fail: msg="Deployment type enterprise not supported for upgrade"
      when: deployment_type == "enterprise"

- name: Backup etcd
  hosts: masters
  vars:
    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  roles:
  - openshift_facts
  tasks:
  - name: Check available disk space for etcd backup
    # We assume to be using the data dir for all backups.
    shell: >
      df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
    register: avail_disk

  - name: Check current embedded etcd disk usage
    shell: >
      du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
    register: etcd_disk_usage
    when: embedded_etcd | bool

  - name: Abort if insufficient disk space for etcd backup
    fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  - name: Install etcd (for etcdctl)
    yum: pkg=etcd state=latest
  - name: Generate etcd backup
    command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  - fail: msg="All done for now."

- name: Re-Run cluster configuration to apply latest configuration changes
  include: ../../common/openshift-cluster/config.yml
  vars:
    g_etcd_group: "{{ 'etcd' }}"
    g_masters_group: "{{ 'masters' }}"
    g_nodes_group: "{{ 'nodes' }}"
    openshift_cluster_id: "{{ cluster_id | default('default') }}"
    openshift_deployment_type: "{{ deployment_type }}"

- name: Upgrade masters
  hosts: masters
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade master packages
      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
    - name: Restart master services
      service: name="{{ openshift.common.service_type}}-master" state=restarted

- name: Upgrade nodes
  hosts: nodes
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade node packages
      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
    - name: Restart node services
      service: name="{{ openshift.common.service_type }}-node" state=restarted

- name: Determine new master version
  hosts: oo_first_master
  tasks:
    - name: Determine new version
      command: >
        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
      register: _new_version

- name: Ensure AOS 3.0.2 or Origin 1.0.6
  hosts: oo_first_master
  tasks:
    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )

- name: Update cluster policy
  hosts: oo_first_master
  tasks:
    - name: oadm policy reconcile-cluster-roles --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-roles --confirm

- name: Update cluster policy bindings
  hosts: oo_first_master
  tasks:
    - name: oadm policy reconcile-cluster-role-bindings --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-role-bindings
        --exclude-groups=system:authenticated
        --exclude-groups=system:unauthenticated
        --exclude-users=system:anonymous
        --additive-only=true --confirm
      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')

- name: Upgrade default router
  hosts: oo_first_master
  vars:
    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default router
      command: >
        {{ oc_cmd }} get -n default dc/router
      register: _default_router
      failed_when: false
      changed_when: false
    - name: Check for allowHostNetwork and allowHostPorts
      when: _default_router.rc == 0
      shell: >
        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
      register: _scc
    - name: Grant allowHostNetwork and allowHostPorts
      when:
        - _default_router.rc == 0
        - "'false' in _scc.stdout"
      command: >
        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
    - name: Update deployment config to 1.0.4/3.0.1 spec
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
    - name: Switch to hostNetwork=true
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
    - name: Update router image to current version
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'

- name: Upgrade default
  hosts: oo_first_master
  vars:
    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default registry
      command: >
          {{ oc_cmd }} get -n default dc/docker-registry
      register: _default_registry
      failed_when: false
      changed_when: false
    - name: Update registry image to current version
      when: _default_registry.rc == 0
      command: >
        {{ oc_cmd }} patch dc/docker-registry -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'

- name: Update image streams and templates
  hosts: oo_first_master
  vars:
    openshift_examples_import_command: "update"
    openshift_deployment_type: "{{ deployment_type }}"
  roles:
    - openshift_examples