diff options
Diffstat (limited to 'playbooks/openshift-master')
23 files changed, 1155 insertions, 0 deletions
diff --git a/playbooks/openshift-master/additional_config.yml b/playbooks/openshift-master/additional_config.yml new file mode 100644 index 000000000..8105f7f88 --- /dev/null +++ b/playbooks/openshift-master/additional_config.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/additional_config.yml diff --git a/playbooks/openshift-master/certificates.yml b/playbooks/openshift-master/certificates.yml new file mode 100644 index 000000000..7ae87c09a --- /dev/null +++ b/playbooks/openshift-master/certificates.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/certificates.yml diff --git a/playbooks/openshift-master/config.yml b/playbooks/openshift-master/config.yml new file mode 100644 index 000000000..c7814207c --- /dev/null +++ b/playbooks/openshift-master/config.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/config.yml diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml new file mode 100644 index 000000000..a90cd6b22 --- /dev/null +++ b/playbooks/openshift-master/private/additional_config.yml @@ -0,0 +1,50 @@ +--- +- name: Master Additional Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Master Additional install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_master_additional: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Additional master configuration + hosts: oo_first_master + vars: + cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" + etcd_urls: "{{ openshift.master.etcd_urls }}" + openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" + omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" + roles: + - role: openshift_project_request_template + when: openshift_project_request_template_manage + - role: openshift_examples + when: openshift_install_examples | default(true) | bool + registry_url: "{{ openshift.master.registry_url }}" + - role: openshift_hosted_templates + registry_url: "{{ openshift.master.registry_url }}" + - role: openshift_manageiq + when: openshift_use_manageiq | default(true) | bool + - role: cockpit + when: + - not openshift.common.is_atomic | bool + - deployment_type == 'openshift-enterprise' + - osm_use_cockpit is undefined or osm_use_cockpit | bool + - openshift.common.deployment_subtype != 'registry' + - role: flannel_register + when: openshift_use_flannel | default(false) | bool + +- name: Master Additional Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Master Additional install 'Complete' + run_once: true + set_stats: + data: + installer_phase_master_additional: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml new file mode 100644 index 000000000..4dbc041b0 --- /dev/null +++ b/playbooks/openshift-master/private/certificates-backup.yml @@ -0,0 +1,38 @@ +--- +- name: Backup and remove master cerftificates + hosts: oo_masters_to_config + any_errors_fatal: true + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + pre_tasks: + - stat: + path: "{{ openshift.common.config_base }}/generated-configs" + register: openshift_generated_configs_dir_stat + - name: Backup generated certificate and config directories + command: > + tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz + {{ openshift.common.config_base }}/generated-configs + {{ openshift.common.config_base }}/master + when: openshift_generated_configs_dir_stat.stat.exists + delegate_to: "{{ openshift_ca_host }}" + run_once: true + - name: Remove generated certificate directories + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ openshift.common.config_base }}/generated-configs" + - name: Remove generated certificates + file: + path: "{{ openshift.common.config_base }}/master/{{ item }}" + state: absent + with_items: + - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" + - "etcd.server.crt" + - "etcd.server.key" + - "master.server.crt" + - "master.server.key" + - "openshift-master.crt" + - "openshift-master.key" + - "openshift-master.kubeconfig" diff --git a/playbooks/openshift-master/private/certificates.yml b/playbooks/openshift-master/private/certificates.yml new file mode 100644 index 000000000..d42d4402b --- /dev/null +++ b/playbooks/openshift-master/private/certificates.yml @@ -0,0 +1,14 @@ +--- +- name: Create OpenShift certificates for master hosts + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + - role: openshift_master_certificates + openshift_master_etcd_hosts: "{{ hostvars + | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | lib_utils_oo_collect('openshift.common.hostname') + | default(none, true) }}" diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml new file mode 100644 index 000000000..3093444b4 --- /dev/null +++ b/playbooks/openshift-master/private/config.yml @@ -0,0 +1,250 @@ +--- +- name: Master Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Master install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_master: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- import_playbook: certificates.yml + +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + +- name: Gather and set facts for master hosts + hosts: oo_masters_to_config + pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + + - name: Check for RPM generated config marker file .config_managed + stat: + path: /etc/origin/.config_managed + register: rpmgenerated_config + + - name: Remove RPM generated config files if present + file: + path: "/etc/origin/{{ item }}" + state: absent + when: + - rpmgenerated_config.stat.exists == true + - deployment_type == 'openshift-enterprise' + with_items: + - master + - node + - .config_managed + + - set_fact: + openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" + openshift_master_etcd_hosts: "{{ hostvars + | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] + | default([])) + | lib_utils_oo_collect('openshift.common.hostname') + | default(none, true) }}" + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + controllers_port: "{{ openshift_master_controllers_port | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" + cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + +- name: Inspect state of first master config settings + hosts: oo_first_master + roles: + - role: openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" + session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" + +- name: Generate master session secrets + hosts: oo_first_master + vars: + g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" + g_session_auth_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}" + g_session_encryption_secrets: "{{ [ 24 | lib_utils_oo_generate_secret ] }}" + roles: + - role: openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + session_auth_secrets: "{{ g_session_auth_secrets }}" + session_encryption_secrets: "{{ g_session_encryption_secrets }}" + when: not g_session_secrets_present | bool + +- name: Configure masters + hosts: oo_masters_to_config + any_errors_fatal: true + vars: + openshift_master_ha: "{{ openshift.master.ha }}" + openshift_master_count: "{{ openshift.master.master_count }}" + openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" + openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + openshift_master_etcd_hosts: "{{ hostvars + | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | lib_utils_oo_collect('openshift.common.hostname') + | default(none, true) }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | lib_utils_oo_collect('openshift.common.ip') | default([]) | join(',') + }}" + roles: + - role: openshift_master_facts + - role: openshift_clock + - role: openshift_cloud_provider + - role: openshift_builddefaults + - role: openshift_buildoverrides + - role: nickhammond.logrotate + - role: contiv + contiv_role: netmaster + when: openshift_use_contiv | default(False) | bool + - role: openshift_master + openshift_master_hosts: "{{ groups.oo_masters_to_config }}" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: tuned + - role: nuage_ca + when: openshift_use_nuage | default(false) | bool + - role: nuage_common + when: openshift_use_nuage | default(false) | bool + - role: nuage_master + when: openshift_use_nuage | default(false) | bool + - role: calico_master + when: openshift_use_calico | default(false) | bool + tasks: + - include_role: + name: kuryr + tasks_from: master + when: openshift_use_kuryr | default(false) | bool + + - name: Setup the node group config maps + include_role: + name: openshift_node_group + when: openshift_master_bootstrap_enabled | default(false) | bool + run_once: True + + post_tasks: + - name: Create group for deployment type + group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + +- name: Configure API Aggregation on masters + hosts: oo_masters + serial: 1 + roles: + - role: openshift_facts + tasks: + - include_tasks: tasks/wire_aggregator.yml + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + +- name: Master Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Master install 'Complete' + run_once: true + set_stats: + data: + installer_phase_master: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-master/private/redeploy-certificates.yml b/playbooks/openshift-master/private/redeploy-certificates.yml new file mode 100644 index 000000000..c0f75ae80 --- /dev/null +++ b/playbooks/openshift-master/private/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: certificates-backup.yml + +- import_playbook: certificates.yml + vars: + openshift_certificates_redeploy: true diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml new file mode 100644 index 000000000..9d3c12ba1 --- /dev/null +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -0,0 +1,300 @@ +--- +- name: Check cert expirys + hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry + +# Update master config when ca-bundle not referenced. Services will be +# restarted below after new CA certificate has been distributed. +- name: Ensure ca-bundle.crt is referenced in master configuration + hosts: oo_masters_to_config + tasks: + - slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: kubeletClientInfo.ca + yaml_value: ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).kubeletClientInfo.ca != 'ca-bundle.crt' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: serviceAccountConfig.masterCA + yaml_value: ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).serviceAccountConfig.masterCA != 'ca-bundle.crt' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: oauthConfig.masterCA + yaml_value: ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: etcdClientInfo.ca + yaml_value: ca-bundle.crt + when: + - groups.oo_etcd_to_config | default([]) | length == 0 + - (g_master_config_output.content|b64decode|from_yaml).etcdClientInfo.ca != 'ca-bundle.crt' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: etcdConfig.peerServingInfo.clientCA + yaml_value: ca-bundle.crt + when: + - groups.oo_etcd_to_config | default([]) | length == 0 + - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.peerServingInfo.clientCA != 'ca-bundle.crt' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: etcdConfig.servingInfo.clientCA + yaml_value: ca-bundle.crt + when: + - groups.oo_etcd_to_config | default([]) | length == 0 + - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' + # Set servingInfo.clientCA to client-ca-bundle.crt in order to roll the CA certificate. + # This change will be reverted in playbooks/redeploy-certificates.yml + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: client-ca-bundle.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'client-ca-bundle.crt' + +- name: Copy current OpenShift CA to legacy directory + hosts: oo_masters_to_config + pre_tasks: + - name: Create legacy-ca directory + file: + path: "{{ openshift.common.config_base }}/master/legacy-ca" + state: directory + mode: 0700 + owner: root + group: root + - command: mktemp -u XXXXXX + register: g_legacy_ca_mktemp + changed_when: false + # Copy CA certificate, key, serial and bundle to legacy-ca with a + # prefix generated by mktemp, ie. XXXXXX-ca.crt. + # + # The following roles will pick up all CA certificates matching + # /.*-ca.crt/ in the legacy-ca directory and ensure they are present + # in the OpenShift CA bundle. + # - openshift_ca + # - openshift_master_certificates + # - openshift_node_certificates + - name: Copy current OpenShift CA to legacy directory + copy: + src: "{{ openshift.common.config_base }}/master/{{ item }}" + dest: "{{ openshift.common.config_base }}/master/legacy-ca/{{ g_legacy_ca_mktemp.stdout }}-{{ item }}" + remote_src: true + # It is possible that redeploying failed and files may be missing. + # Ignore errors in this case. Files should have been copied to + # legacy-ca directory in previous run. + ignore_errors: true + with_items: + - "ca.crt" + - "ca.key" + - "ca.serial.txt" + - "ca-bundle.crt" + +- name: Create temporary directory for creating new CA certificate + hosts: oo_first_master + tasks: + - name: Create temporary directory for creating new CA certificate + command: > + mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_new_openshift_ca_mktemp + changed_when: false + +- name: Create OpenShift CA + hosts: oo_first_master + vars: + # Set openshift_ca_config_dir to a temporary directory where CA + # will be created. We'll replace the existing CA with the CA + # created in the temporary directory. + openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_master_mktemp + changed_when: false + +- name: Retrieve OpenShift CA + hosts: oo_first_master + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Retrieve CA certificate, key, bundle and serial + fetch: + src: "{{ hostvars[openshift_ca_host].g_new_openshift_ca_mktemp.stdout }}/{{ item }}" + dest: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: + - ca.crt + - ca.key + - ca-bundle.crt + - ca.serial.txt + - client-ca-bundle.crt + delegate_to: "{{ openshift_ca_host }}" + run_once: true + changed_when: false + +- name: Distribute OpenShift CA to masters + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Deploy CA certificate, key, bundle and serial + copy: + src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/{{ item }}" + dest: "{{ openshift.common.config_base }}/master/" + with_items: + - ca.crt + - ca.key + - ca-bundle.crt + - ca.serial.txt + - client-ca-bundle.crt + - name: Update master client kubeconfig CA data + kubeclient_ca: + client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" + ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + - name: Update admin client kubeconfig CA data + kubeclient_ca: + client_path: "{{ openshift.common.config_base }}/master/admin.kubeconfig" + ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + - name: Lookup default group for ansible_ssh_user + command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}" + changed_when: false + register: _ansible_ssh_user_gid + - set_fact: + client_users: "{{ [ansible_ssh_user, 'root'] | unique }}" + - name: Create the client config dir(s) + file: + path: "~{{ item }}/.kube" + state: directory + mode: 0700 + owner: "{{ item }}" + group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" + with_items: "{{ client_users }}" + - name: Copy the admin client config(s) + copy: + src: "{{ openshift.common.config_base }}/master/admin.kubeconfig" + dest: "~{{ item }}/.kube/config" + remote_src: yes + with_items: "{{ client_users }}" + - name: Update the permissions on the admin client config(s) + file: + path: "~{{ item }}/.kube/config" + state: file + mode: 0700 + owner: "{{ item }}" + group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" + with_items: "{{ client_users }}" + +- import_playbook: restart.yml + # Do not restart masters when master or etcd certificates were previously expired. + when: + # masters + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | lib_utils_oo_select_keys(groups['etcd']) + | lib_utils_oo_collect('check_results.check_results.etcd') + | lib_utils_oo_collect('health'))) + +- name: Distribute OpenShift CA certificate to nodes + hosts: oo_nodes_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - copy: + src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/ca-bundle.crt" + dest: "{{ openshift.common.config_base }}/node/ca.crt" + - name: Copy OpenShift CA to system CA trust + copy: + src: "{{ item.cert }}" + dest: "/etc/pki/ca-trust/source/anchors/{{ item.id }}-{{ item.cert | basename }}" + remote_src: yes + with_items: + - id: openshift + cert: "{{ openshift.common.config_base }}/node/ca.crt" + notify: + - update ca trust + - name: Update node client kubeconfig CA data + kubeclient_ca: + client_path: "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.kubeconfig" + ca_path: "{{ openshift.common.config_base }}/node/ca.crt" + handlers: + # Normally this handler would restart docker after updating ca + # trust. We'll do that when we restart nodes to avoid restarting + # docker on all nodes in parallel. + - name: update ca trust + command: update-ca-trust + +- name: Delete temporary directory on CA host + hosts: oo_first_master + tasks: + - file: + path: "{{ g_new_openshift_ca_mktemp.stdout }}" + state: absent + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: + name: "{{ g_master_mktemp.stdout }}" + state: absent + changed_when: false + +- import_playbook: ../../openshift-node/private/restart.yml + # Do not restart nodes when node, master or etcd certificates were previously expired. + when: + # nodes + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) + # masters + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | lib_utils_oo_select_keys(groups['oo_masters_to_config']) + | lib_utils_oo_collect('check_results.check_results.ocp_certs') + | lib_utils_oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | lib_utils_oo_select_keys(groups['etcd']) + | lib_utils_oo_collect('check_results.check_results.etcd') + | lib_utils_oo_collect('health'))) diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml new file mode 100644 index 000000000..5cb284935 --- /dev/null +++ b/playbooks/openshift-master/private/restart.yml @@ -0,0 +1,18 @@ +--- +- import_playbook: validate_restart.yml + +- name: Restart masters + hosts: oo_masters_to_config + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + serial: 1 + handlers: + - import_tasks: ../../../roles/openshift_master/handlers/main.yml + roles: + - openshift_facts + post_tasks: + - include_tasks: tasks/restart_hosts.yml + when: openshift_rolling_restart_mode | default('services') == 'system' + + - include_tasks: tasks/restart_services.yml + when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/revert-client-ca.yml b/playbooks/openshift-master/private/revert-client-ca.yml new file mode 100644 index 000000000..9ae23bf5b --- /dev/null +++ b/playbooks/openshift-master/private/revert-client-ca.yml @@ -0,0 +1,17 @@ +--- +- name: Set servingInfo.clientCA = ca.crt in master config + hosts: oo_masters_to_config + tasks: + - name: Read master config + slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_output + + # servingInfo.clientCA may be set as the client-ca-bundle.crt from + # CA redeployment and this task reverts that change. + - name: Set servingInfo.clientCA = ca.crt in master config + modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/openshift-master/private/roles b/playbooks/openshift-master/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-master/private/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml new file mode 100644 index 000000000..007b23ea3 --- /dev/null +++ b/playbooks/openshift-master/private/scaleup.yml @@ -0,0 +1,57 @@ +--- +- name: Update master count + hosts: oo_masters:!oo_masters_to_config + serial: 1 + roles: + - openshift_facts + post_tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + - name: Update master count + modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.masterCount' + yaml_value: "{{ openshift.master.master_count }}" + notify: + - restart master api + - restart master controllers + handlers: + - name: restart master api + service: name={{ openshift_service_type }}-master-controllers state=restarted + notify: verify api server + # We retry the controllers because the API may not be 100% initialized yet. + - name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + - name: verify api server + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + +- import_playbook: set_network_facts.yml + +- import_playbook: ../../openshift-etcd/private/certificates.yml + +- import_playbook: config.yml + +- import_playbook: ../../openshift-loadbalancer/private/config.yml + +- import_playbook: ../../openshift-node/private/certificates.yml + +- import_playbook: ../../openshift-node/private/config.yml diff --git a/playbooks/openshift-master/private/set_network_facts.yml b/playbooks/openshift-master/private/set_network_facts.yml new file mode 100644 index 000000000..9a6cf26fc --- /dev/null +++ b/playbooks/openshift-master/private/set_network_facts.yml @@ -0,0 +1,34 @@ +--- +- name: Read first master\'s config + hosts: oo_first_master + gather_facts: no + tasks: + - stat: + path: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_stat + - slurp: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + register: g_master_config_slurp + +- name: Set network facts for masters + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_facts + post_tasks: + - block: + - set_fact: + osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" + when: osm_cluster_network_cidr is not defined + - set_fact: + osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" + when: osm_host_subnet_length is not defined + - set_fact: + openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" + when: openshift_portal_net is not defined + - openshift_facts: + role: common + local_facts: + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + when: + - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml new file mode 100644 index 000000000..a5dbe0590 --- /dev/null +++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml @@ -0,0 +1,40 @@ +--- +- name: Restart master system + # https://github.com/ansible/ansible/issues/10616 + shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" + async: 1 + poll: 0 + ignore_errors: true + become: yes + +# WARNING: This process is riddled with weird behavior. + +# Workaround for https://github.com/ansible/ansible/issues/21269 +- set_fact: + wait_for_host: "{{ ansible_host }}" + +# Ansible's blog documents this *without* the port, which appears to now +# just wait until the timeout value and then proceed without checking anything. +# port is now required. +# +# However neither ansible_ssh_port or ansible_port are reliably defined, likely +# only if overridden. Assume a default of 22. +- name: Wait for master to restart + local_action: + module: wait_for + host="{{ wait_for_host }}" + state=started + delay=10 + timeout=600 + port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" + become: no + +# Now that ssh is back up we can wait for API on the remote system, +# avoiding some potential connection issues from local system: +- name: Wait for master API to come back online + wait_for: + host: "{{ openshift.common.hostname }}" + state: started + delay: 10 + port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml new file mode 100644 index 000000000..4e1b3a3be --- /dev/null +++ b/playbooks/openshift-master/private/tasks/restart_services.yml @@ -0,0 +1,4 @@ +--- +- include_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml new file mode 100644 index 000000000..4f55d5c82 --- /dev/null +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -0,0 +1,214 @@ +--- +- name: Make temp cert dir + command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX + register: certtemp + changed_when: False + +- name: Check for First Master Aggregator Signer cert + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: first_proxy_ca_crt + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + +- name: Check for First Master Aggregator Signer key + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: first_proxy_ca_key + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + +# TODO: this currently has a bug where hostnames are required +- name: Creating First Master Aggregator signer certs + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert + --cert=/etc/origin/master/front-proxy-ca.crt + --key=/etc/origin/master/front-proxy-ca.key + --serial=/etc/origin/master/ca.serial.txt + delegate_to: "{{ groups.oo_first_master.0 }}" + when: + - not first_proxy_ca_crt.stat.exists + - not first_proxy_ca_key.stat.exists + +- name: Check for Aggregator Signer cert + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: proxy_ca_crt + changed_when: false + +- name: Check for Aggregator Signer key + stat: + path: /etc/origin/master/front-proxy-ca.crt + register: proxy_ca_key + changed_when: false + +- name: Copy Aggregator Signer certs from first master + fetch: + src: "/etc/origin/master/{{ item }}" + dest: "{{ certtemp.stdout }}/{{ item }}" + flat: yes + with_items: + - front-proxy-ca.crt + - front-proxy-ca.key + delegate_to: "{{ groups.oo_first_master.0 }}" + when: + - not proxy_ca_key.stat.exists + - not proxy_ca_crt.stat.exists + +- name: Copy Aggregator Signer certs to host + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/{{ item }}" + with_items: + - front-proxy-ca.crt + - front-proxy-ca.key + when: + - not proxy_ca_key.stat.exists + - not proxy_ca_crt.stat.exists + +# oc_adm_ca_server_cert: +# cert: /etc/origin/master/front-proxy-ca.crt +# key: /etc/origin/master/front-proxy-ca.key + +- name: Check for first master api-client config + stat: + path: /etc/origin/master/aggregator-front-proxy.kubeconfig + register: first_front_proxy_kubeconfig + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + +# create-api-client-config generates a ca.crt file which will +# overwrite the OpenShift CA certificate. Generate the aggregator +# kubeconfig in a temporary directory and then copy files into the +# master config dir to avoid overwriting ca.crt. +- block: + - name: Create first master api-client config for Aggregator + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config + --certificate-authority=/etc/origin/master/front-proxy-ca.crt + --signer-cert=/etc/origin/master/front-proxy-ca.crt + --signer-key=/etc/origin/master/front-proxy-ca.key + --user aggregator-front-proxy + --client-dir={{ certtemp.stdout }} + --signer-serial=/etc/origin/master/ca.serial.txt + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Copy first master api-client config for Aggregator + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/" + remote_src: true + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + when: + - not first_front_proxy_kubeconfig.stat.exists + +- name: Check for api-client config + stat: + path: /etc/origin/master/aggregator-front-proxy.kubeconfig + register: front_proxy_kubeconfig + +- name: Copy api-client config from first master + fetch: + src: "/etc/origin/master/{{ item }}" + dest: "{{ certtemp.stdout }}/{{ item }}" + flat: yes + delegate_to: "{{ groups.oo_first_master.0 }}" + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + when: + - not front_proxy_kubeconfig.stat.exists + +- name: Copy api-client config to host + copy: + src: "{{ certtemp.stdout }}/{{ item }}" + dest: "/etc/origin/master/{{ item }}" + with_items: + - aggregator-front-proxy.crt + - aggregator-front-proxy.key + - aggregator-front-proxy.kubeconfig + when: + - not front_proxy_kubeconfig.stat.exists + +- name: Delete temp directory + file: + name: "{{ certtemp.stdout }}" + state: absent + changed_when: False + +- name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + +- name: Update master config + yedit: + state: present + src: /etc/origin/master/master-config.yaml + edits: + - key: aggregatorConfig.proxyClientInfo.certFile + value: aggregator-front-proxy.crt + - key: aggregatorConfig.proxyClientInfo.keyFile + value: aggregator-front-proxy.key + - key: authConfig.requestHeader.clientCA + value: front-proxy-ca.crt + - key: authConfig.requestHeader.clientCommonNames + value: [aggregator-front-proxy] + - key: authConfig.requestHeader.usernameHeaders + value: [X-Remote-User] + - key: authConfig.requestHeader.groupHeaders + value: [X-Remote-Group] + - key: authConfig.requestHeader.extraHeaderPrefixes + value: [X-Remote-Extra-] + - key: assetConfig.extensionScripts + value: [/etc/origin/master/openshift-ansible-catalog-console.js] + - key: kubernetesMasterConfig.apiServerArguments.runtime-config + value: [apis/settings.k8s.io/v1alpha1=true] + - key: admissionConfig.pluginConfig.PodPreset.configuration.kind + value: DefaultAdmissionConfig + - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion + value: v1 + - key: admissionConfig.pluginConfig.PodPreset.configuration.disable + value: false + register: yedit_output + +#restart master serially here +- name: restart master api + systemd: name={{ openshift_service_type }}-master-api state=restarted + when: + - yedit_output.changed + +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + when: + - yedit_output.changed + +- name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false + when: + - yedit_output.changed diff --git a/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js new file mode 100644 index 000000000..fd02325ba --- /dev/null +++ b/playbooks/openshift-master/private/templates/openshift-ansible-catalog-console.js @@ -0,0 +1 @@ +window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml new file mode 100644 index 000000000..1077d0b9c --- /dev/null +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -0,0 +1,62 @@ +--- +- name: Validate configuration for rolling restart + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - fail: + msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" + when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" + +# Creating a temp file on localhost, we then check each system that will +# be rebooted to see if that file exists, if so we know we're running +# ansible on a machine that needs a reboot, and we need to error out. +- name: Create temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - local_action: command mktemp + register: mktemp + changed_when: false + +- name: Check if temp file exists on any masters + hosts: oo_masters_to_config + tasks: + - stat: path="{{ hostvars.localhost.mktemp.stdout }}" + register: exists + changed_when: false + +- name: Cleanup temp file on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent + changed_when: false + +- name: Warn if restarting the system where ansible is running + hosts: oo_masters_to_config + tasks: + - pause: + prompt: > + Warning: Running playbook from a host that will be restarted! + Press CTRL+C and A to abort playbook execution. You may + continue by pressing ENTER but the playbook will stop + executing after this system has been restarted and services + must be verified manually. To only restart services, set + openshift_master_rolling_restart_mode=services in host + inventory and relaunch the playbook. + when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' + - set_fact: + current_host: "{{ exists.stat.exists }}" + when: openshift.common.rolling_restart_mode == 'system' diff --git a/playbooks/openshift-master/redeploy-certificates.yml b/playbooks/openshift-master/redeploy-certificates.yml new file mode 100644 index 000000000..8b7272485 --- /dev/null +++ b/playbooks/openshift-master/redeploy-certificates.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-certificates.yml + +- import_playbook: private/restart.yml diff --git a/playbooks/openshift-master/redeploy-openshift-ca.yml b/playbooks/openshift-master/redeploy-openshift-ca.yml new file mode 100644 index 000000000..27f4e6b7d --- /dev/null +++ b/playbooks/openshift-master/redeploy-openshift-ca.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/redeploy-openshift-ca.yml diff --git a/playbooks/openshift-master/restart.yml b/playbooks/openshift-master/restart.yml new file mode 100644 index 000000000..041c1384d --- /dev/null +++ b/playbooks/openshift-master/restart.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/restart.yml diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml new file mode 100644 index 000000000..f717cd0e9 --- /dev/null +++ b/playbooks/openshift-master/scaleup.yml @@ -0,0 +1,23 @@ +--- +- import_playbook: ../init/evaluate_groups.yml + +- name: Ensure there are new_masters or new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_masters or no new_nodes in inventory. Please + add hosts to the new_masters and new_nodes host groups to add + masters. + when: + - g_new_master_hosts | default([]) | length == 0 + - g_new_node_hosts | default([]) | length == 0 + +# Need a better way to do the above check for node without +# running evaluate_groups and init/main.yml +- import_playbook: ../init/main.yml + +- import_playbook: private/scaleup.yml |