summaryrefslogtreecommitdiffstats
path: root/roles/openshift_master/tasks/main.yml
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_master/tasks/main.yml')
-rw-r--r--roles/openshift_master/tasks/main.yml278
1 files changed, 174 insertions, 104 deletions
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index aed5598c0..894fe8e2b 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -7,40 +7,60 @@
- fail:
msg: >
Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
- when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
+ when:
+ - openshift_master_oauth_grant_method is defined
+ - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
# HA Variable Validation
- fail:
msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
- when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+ when:
+ - openshift.master.ha | bool
+ - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
- fail:
msg: "'native' high availability is not supported for the requested OpenShift version"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool
+ when:
+ - openshift.master.ha | bool
+ - openshift.master.cluster_method == "native"
+ - not openshift.common.version_gte_3_1_or_1_1 | bool
- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
+ when:
+ - openshift.master.ha | bool
+ - openshift.master.cluster_method == "pacemaker"
+ - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
- fail:
msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
- when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool
+ when:
+ - openshift.master.ha | bool
+ - openshift.master.cluster_method == "pacemaker"
+ - openshift.common.is_containerized | bool
+
+- name: Open up firewall ports
+ include: firewall.yml
+ static: yes
- name: Install Master package
package:
name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- when: not openshift.common.is_containerized | bool
+ when:
+ - not openshift.common.is_containerized | bool
-- name: Create openshift.common.data_dir
+- name: Create r_openshift_master_data_dir
file:
- path: "{{ openshift.common.data_dir }}"
+ path: "{{ r_openshift_master_data_dir }}"
state: directory
mode: 0755
owner: root
group: root
- when: openshift.common.is_containerized | bool
+ when:
+ - openshift.common.is_containerized | bool
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and install_result | changed
+ when:
+ - openshift.common.is_containerized | bool
- name: Re-gather package dependent master facts
openshift_facts:
@@ -57,9 +77,8 @@
args:
creates: "{{ openshift_master_policy }}"
notify:
- - restart master
- - restart master api
- - restart master controllers
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
copy:
@@ -67,21 +86,22 @@
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
notify:
- - restart master
- - restart master api
- - restart master controllers
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
package: name=httpd-tools state=present
- when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
- not openshift.common.is_atomic | bool
+ when:
+ - item.kind == 'HTPasswdPasswordIdentityProvider'
+ - not openshift.common.is_atomic | bool
with_items: "{{ openshift.master.identity_providers }}"
- name: Ensure htpasswd directory exists
file:
path: "{{ item.filename | dirname }}"
state: directory
- when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ when:
+ - item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: "{{ openshift.master.identity_providers }}"
- name: Create the htpasswd file if needed
@@ -89,7 +109,9 @@
dest: "{{ item.filename }}"
src: htpasswd.j2
backup: yes
- when: item.kind == 'HTPasswdPasswordIdentityProvider' and openshift.master.manage_htpasswd | bool
+ when:
+ - item.kind == 'HTPasswdPasswordIdentityProvider'
+ - openshift.master.manage_htpasswd | bool
with_items: "{{ openshift.master.identity_providers }}"
- name: Ensure htpasswd file exists
@@ -98,7 +120,8 @@
force: no
content: ""
mode: 0600
- when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ when:
+ - item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: "{{ openshift.master.identity_providers }}"
- name: Create the ldap ca file if needed
@@ -107,7 +130,9 @@
content: "{{ openshift.master.ldap_ca }}"
mode: 0600
backup: yes
- when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider'
+ when:
+ - openshift.master.ldap_ca is defined
+ - item.kind == 'LDAPPasswordIdentityProvider'
with_items: "{{ openshift.master.identity_providers }}"
- name: Create the openid ca file if needed
@@ -116,7 +141,10 @@
content: "{{ openshift.master.openid_ca }}"
mode: 0600
backup: yes
- when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != ''
+ when:
+ - openshift.master.openid_ca is defined
+ - item.kind == 'OpenIDIdentityProvider'
+ - item.ca | default('') != ''
with_items: "{{ openshift.master.identity_providers }}"
- name: Create the request header ca file if needed
@@ -125,15 +153,38 @@
content: "{{ openshift.master.request_header_ca }}"
mode: 0600
backup: yes
- when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
+ when:
+ - openshift.master.request_header_ca is defined
+ - item.kind == 'RequestHeaderIdentityProvider'
+ - item.clientCA | default('') != ''
with_items: "{{ openshift.master.identity_providers }}"
+# This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
+# The template file will stomp any other settings made.
+- block:
+ - name: check whether our docker-registry setting exists in the env file
+ command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+ failed_when: false
+ changed_when: false
+ register: l_already_set
+
+ - set_fact:
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+
+- name: Set fact of all etcd host IPs
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+
- name: Install the systemd units
include: systemd_units.yml
- name: Install Master system container
include: system_container.yml
- when: openshift.common.is_containerized | bool and openshift.common.is_master_system_container | bool
+ when:
+ - openshift.common.is_containerized | bool
+ - openshift.common.is_master_system_container | bool
- name: Create session secrets file
template:
@@ -142,10 +193,11 @@
owner: root
group: root
mode: 0600
- when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined
+ when:
+ - openshift.master.session_auth_secrets is defined
+ - openshift.master.session_encryption_secrets is defined
notify:
- - restart master
- - restart master api
+ - restart master api
- set_fact:
translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
@@ -160,89 +212,82 @@
group: root
mode: 0600
notify:
- - restart master
- - restart master api
- - restart master controllers
-
-- include: set_loopback_context.yml
- when: openshift.common.version_gte_3_2_or_1_2
-
-# TODO: Master startup can fail when ec2 transparently reallocates the block
-# storage, causing etcd writes to temporarily fail. Retry failures blindly just
-# once to allow time for this transient condition to to resolve and for systemd
-# to restart the master (which will eventually succeed).
-#
-# https://github.com/coreos/etcd/issues/3864
-# https://github.com/openshift/origin/issues/6065
-# https://github.com/openshift/origin/issues/6447
-- name: Start and enable master
- systemd:
- daemon_reload: yes
- name: "{{ openshift.common.service_type }}-master"
- enabled: yes
- state: started
- when: not openshift_master_ha | bool
- register: start_result
- until: not start_result | failed
- retries: 1
- delay: 60
- notify: Verify API Server
-
-- name: Stop and disable non-HA master when running HA
- systemd:
- name: "{{ openshift.common.service_type }}-master"
- enabled: no
- state: stopped
- when: openshift_master_ha | bool
- register: task_result
- failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+ - restart master api
+ - restart master controllers
+
+- name: modify controller args
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: kubernetesMasterConfig.controllerArguments.cluster-signing-cert-file
+ value:
+ - /etc/origin/master/ca.crt
+ - key: kubernetesMasterConfig.controllerArguments.cluster-signing-key-file
+ value:
+ - /etc/origin/master/ca.key
+ notify:
+ - restart master controllers
+ when: openshift_master_bootstrap_enabled | default(False)
-- set_fact:
- master_service_status_changed: "{{ start_result | changed }}"
- when: not openshift_master_ha | bool
+- include: registry_auth.yml
-- name: Mask master service
- systemd:
- name: "{{ openshift.common.service_type }}-master"
- masked: yes
- when: >
- openshift_master_ha | bool and
- openshift.master.cluster_method == 'native' and
- not openshift.common.is_containerized | bool
+- include: set_loopback_context.yml
+ when:
+ - openshift.common.version_gte_3_2_or_1_2
- name: Start and enable master api on first master
systemd:
name: "{{ openshift.common.service_type }}-master-api"
enabled: yes
state: started
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
- register: start_result
- until: not start_result | failed
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname == openshift_master_hosts[0]
+ register: l_start_result
+ until: not l_start_result | failed
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when:
+ - l_start_result | failed
+
- set_fact:
- master_api_service_status_changed: "{{ start_result | changed }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
+ master_api_service_status_changed: "{{ l_start_result | changed }}"
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname == openshift_master_hosts[0]
- pause:
seconds: 15
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ when:
+ - openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
- name: Start and enable master api all masters
systemd:
name: "{{ openshift.common.service_type }}-master-api"
enabled: yes
state: started
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
- register: start_result
- until: not start_result | failed
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname != openshift_master_hosts[0]
+ register: l_start_result
+ until: not l_start_result | failed
retries: 1
delay: 60
+- name: Dump logs from master-api if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
+ when:
+ - l_start_result | failed
+
- set_fact:
- master_api_service_status_changed: "{{ start_result | changed }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
+ master_api_service_status_changed: "{{ l_start_result | changed }}"
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname != openshift_master_hosts[0]
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
@@ -257,59 +302,84 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
- register: api_available_output
- until: api_available_output.stdout == 'ok'
+ register: l_api_available_output
+ until: l_api_available_output.stdout == 'ok'
retries: 120
delay: 1
run_once: true
changed_when: false
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+ when:
+ - openshift.master.cluster_method == 'native'
+ - master_api_service_status_changed | bool
- name: Start and enable master controller on first master
systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
- register: start_result
- until: not start_result | failed
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname == openshift_master_hosts[0]
+ register: l_start_result
+ until: not l_start_result | failed
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when:
+ - l_start_result | failed
+
- name: Wait for master controller service to start on first master
pause:
seconds: 15
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ when:
+ - openshift.master.cluster_method == 'native'
- name: Start and enable master controller on all masters
systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
- register: start_result
- until: not start_result | failed
+ when:
+ - openshift.master.cluster_method == 'native'
+ - inventory_hostname != openshift_master_hosts[0]
+ register: l_start_result
+ until: not l_start_result | failed
retries: 1
delay: 60
+- name: Dump logs from master-controllers if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
+ when:
+ - l_start_result | failed
+
- set_fact:
- master_controllers_service_status_changed: "{{ start_result | changed }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ master_controllers_service_status_changed: "{{ l_start_result | changed }}"
+ when:
+ - openshift.master.cluster_method == 'native'
- name: Install cluster packages
package: name=pcs state=present
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- and not openshift.common.is_containerized | bool
- register: install_result
+ when:
+ - openshift.master.cluster_method == 'pacemaker'
+ - not openshift.common.is_containerized | bool
+ register: l_install_result
- name: Start and enable cluster service
systemd:
name: pcsd
enabled: yes
state: started
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- and not openshift.common.is_containerized | bool
+ when:
+ - openshift.master.cluster_method == 'pacemaker'
+ - not openshift.common.is_containerized | bool
- name: Set the cluster user password
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
- when: install_result | changed
+ when:
+ - l_install_result | changed
+
+- name: node bootstrap settings
+ include: bootstrap.yml
+ when: openshift_master_bootstrap_enabled | default(False)