diff options
Diffstat (limited to 'playbooks')
37 files changed, 298 insertions, 96 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index a1f541712..58b3a7835 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -325,6 +325,10 @@ service: name=docker state=restarted failed_when: false when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 - hosts: masters become: yes diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml index 119df9c7d..c2f4dfedc 100644 --- a/playbooks/aws/openshift-cluster/cluster_hosts.yml +++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml @@ -4,6 +4,8 @@ g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" +g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_etcd'] | default([])) }}" + g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 8d64b0521..821a0f30e 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -35,4 +35,3 @@ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md index 4b2ff1f94..f0f14b268 100644 --- a/playbooks/byo/openshift-checks/README.md +++ b/playbooks/byo/openshift-checks/README.md @@ -39,7 +39,9 @@ against your inventory file. Here is the step-by-step: $ cd openshift-ansible ``` -2. Run the appropriate playbook: +2. Install the [dependencies](../../../README.md#setup) + +3. Run the appropriate playbook: ```console $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml @@ -57,9 +59,8 @@ against your inventory file. Here is the step-by-step: $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v ``` -## Running via Docker image +## Running in a container This repository is built into a Docker image including Ansible so that it can -be run anywhere Docker is available. Instructions for doing so may be found -[in the README](../../README_CONTAINER_IMAGE.md). - +be run anywhere Docker is available, without the need to manually install dependencies. +Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md). diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 9d086b7b6..e807ac004 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -1,6 +1,8 @@ --- g_etcd_hosts: "{{ groups.etcd | default([]) }}" +g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" g_master_hosts: "{{ groups.masters | default([]) }}" @@ -18,6 +20,7 @@ g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) - | union(g_lb_hosts) | union(g_nfs_hosts) + | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) + | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts) | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index acf5469bf..c0978c6f6 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -3,10 +3,6 @@ tags: - always -- include: ../../common/openshift-cluster/std_include.yml - tags: - - always - - include: ../../common/openshift-cluster/config.yml vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..f03854c2a --- /dev/null +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -0,0 +1,22 @@ +--- +- hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + - add_host: + name: "{{ item }}" + groups: l_oo_all_hosts + with_items: "{{ g_all_hosts }}" + +- hosts: l_oo_all_hosts + gather_facts: no + tasks: + - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml + +- include: ../../common/openshift-cluster/evaluate_groups.yml +- include: ../../common/openshift-etcd/scaleup.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index c7766ff04..7e83b4aa6 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,16 +1,13 @@ --- -# openshift_health_checker depends on openshift_version which now requires group eval. - include: ../openshift-cluster/evaluate_groups.yml - tags: - - always - name: Run OpenShift health checks hosts: OSEv3 roles: - openshift_health_checker vars: - - r_openshift_health_checker_playbook_context: "health" + - r_openshift_health_checker_playbook_context: health post_tasks: - - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + - action: openshift_health_check args: checks: ['@health'] diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index 7ca9f7e8b..afd4f95e0 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,16 +1,13 @@ --- -# openshift_health_checker depends on openshift_version which now requires group eval. - include: ../openshift-cluster/evaluate_groups.yml - tags: - - always - hosts: OSEv3 name: run OpenShift pre-install checks roles: - openshift_health_checker vars: - - r_openshift_health_checker_playbook_context: "pre-install" + - r_openshift_health_checker_playbook_context: pre-install post_tasks: - - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513 + - action: openshift_health_check args: checks: ['@preflight'] diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 7224ae712..7136f1c1f 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,4 +1,8 @@ --- +- include: std_include.yml + tags: + - always + # TODO: refactor this into its own include # and pass a variable for ctx - name: Verify Requirements @@ -6,7 +10,7 @@ roles: - openshift_health_checker vars: - - r_openshift_health_checker_playbook_context: "install" + - r_openshift_health_checker_playbook_context: install post_tasks: - action: openshift_health_check args: diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index baca72c58..8accda8c7 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,10 +5,10 @@ become: no gather_facts: no tasks: - - name: Evaluate groups - g_etcd_hosts required + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required fail: - msg: This playbook requires g_etcd_hosts to be set - when: g_etcd_hosts is not defined + msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set + when: "{{ g_etcd_hosts is not defined and g_new_etcd_hosts is not defined}}" - name: Evaluate groups - g_master_hosts or g_new_master_hosts required fail: @@ -67,6 +67,15 @@ when: g_master_hosts|length > 0 changed_when: no + - name: Evaluate oo_new_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_new_etcd_hosts | default([]) }}" + changed_when: no + - name: Evaluate oo_masters_to_config add_host: name: "{{ item }}" diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 8d94b6509..ce7f981ab 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,6 +26,8 @@ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" roles: + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool @@ -45,8 +47,6 @@ - role: cockpit-ui when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) - - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - name: Update master-config for publicLoggingURL hosts: oo_masters_to_config:!oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 57580406c..c5f0c406a 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,5 +1,5 @@ --- -- include: evaluate_groups.yml +- include: std_include.yml - name: OpenShift Aggregated Logging hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index bcff4a1a1..2c8ad5b75 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -5,3 +5,12 @@ hosts: oo_first_master roles: - openshift_metrics + +- name: OpenShift Metrics + hosts: oo_masters:!oo_first_master + serial: 1 + tasks: + - name: Setup the non-first masters configs + include_role: + name: openshift_metrics + tasks_from: update_master_config.yaml diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index f46553a95..748bbbf91 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -118,7 +118,7 @@ --type=kubernetes.io/tls --config={{ mktemp.stdout }}/admin.kubeconfig --confirm - -o json | {{ openshift.common.client_binary }} replace -f - + -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f - - name: Remove temporary router certificate and key files file: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 07db071ce..02b8a9d3c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -52,9 +52,13 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - include: upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml index 1b418920f..13313377e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -24,4 +28,5 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 17f8fc6e9..35d000e49 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -32,7 +32,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml new file mode 100644 index 000000000..497709d25 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -0,0 +1,13 @@ +--- +- name: Verify Host Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 227fbf60a..6a0471948 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -12,6 +12,12 @@ command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -140,16 +146,21 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - - set_fact: - master_update_complete: True - -- name: Post master upgrade - Upgrade clusterpolicies storage - hosts: oo_first_master - tasks: - - name: Upgrade clusterpolicies storage + - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + run_once: true + delegate_to: "{{ groups.oo_first_master.0 }}" + + - set_fact: + master_update_complete: True ############################################################################## # Gate on master update complete @@ -223,18 +234,24 @@ - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name + {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true - - name: Upgrade job storage + - name: Migrate storage post policy reconciliation command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool - set_fact: reconcile_complete: True @@ -294,8 +311,12 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 1d1e440d4..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -26,8 +26,12 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5b9ac9e8f..da4444867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -70,6 +70,10 @@ # docker is configured and running. skip_docker_role: True +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index c655449fa..3e7a48669 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -17,6 +17,26 @@ tags: - always +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master' }}" + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + when: + - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + - name: Backup v2 data hosts: oo_etcd_to_migrate gather_facts: no @@ -47,26 +67,6 @@ when: - etcd_backup_failed | length > 0 -- name: Prepare masters for etcd data migration - hosts: oo_masters_to_config - tasks: - - set_fact: - master_services: - - "{{ openshift.common.service_type + '-master' }}" - - set_fact: - master_services: - - "{{ openshift.common.service_type + '-master-controllers' }}" - - "{{ openshift.common.service_type + '-master-api' }}" - when: - - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool - - debug: - msg: "master service name: {{ master_services }}" - - name: Stop masters - service: - name: "{{ item }}" - state: stopped - with_items: "{{ master_services }}" - - name: Migrate etcd data from v2 to v3 hosts: oo_etcd_to_migrate gather_facts: no diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..47fa8cdf5 --- /dev/null +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -0,0 +1,30 @@ +--- +- name: Configure etcd + hosts: oo_new_etcd_to_config + serial: 1 + any_errors_fatal: true + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + pre_tasks: + - name: Add new etcd members to cluster + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ etcd_ca_host }}:{{ etcd_client_port }} + member add {{ inventory_hostname }} {{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ etcd_peer_port }} + delegate_to: "{{ etcd_ca_host }}" + register: etcd_add_check + roles: + - role: openshift_etcd + when: etcd_add_check.rc == 0 + etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_initial_cluster_state: "existing" + initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}" + etcd_hostname: "{{ inventory_hostname }}" + etcd_ca_setup: False + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - role: nickhammond.logrotate + when: etcd_add_check.rc == 0 diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 1efdfb336..edc15a3f2 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,6 +1,6 @@ --- -- name: Open firewall ports for GlusterFS - hosts: oo_glusterfs_to_config +- name: Open firewall ports for GlusterFS nodes + hosts: glusterfs vars: os_firewall_allow: - service: glusterfs_sshd @@ -14,7 +14,24 @@ roles: - role: os_firewall when: - - openshift_storage_glusterfs_is_native | default(True) + - openshift_storage_glusterfs_is_native | default(True) | bool + +- name: Open firewall ports for GlusterFS registry nodes + hosts: glusterfs_registry + vars: + os_firewall_allow: + - service: glusterfs_sshd + port: "2222/tcp" + - service: glusterfs_daemon + port: "24007/tcp" + - service: glusterfs_management + port: "24008/tcp" + - service: glusterfs_bricks + port: "49152-49251/tcp" + roles: + - role: os_firewall + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index c414913bf..2dacc1218 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -12,5 +12,6 @@ openshift_use_nuage | default(false), nuage_mon_rest_server_port | default(none))) + openshift_loadbalancer_additional_backends | default([]) }}" + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7d3a371e3..b30450def 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -5,6 +5,19 @@ t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + - name: Check for RPM generated config marker file .config_managed stat: path: /etc/origin/.config_managed @@ -69,7 +82,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Inspect state of first master session secrets and config +- name: Inspect state of first master config settings hosts: oo_first_master roles: - role: openshift_facts @@ -98,6 +111,42 @@ set_fact: l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" + - name: Generate master session secrets hosts: oo_first_master vars: @@ -127,6 +176,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" roles: - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -142,6 +194,10 @@ etcd_cert_prefix: "master.etcd-" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - role: nuage_master when: openshift.common.use_nuage | bool - role: calico_master diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 67ba0aa2e..a5dbe0590 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -37,3 +37,4 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 508b5a3ac..a844fb369 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -15,6 +15,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: openshift_master_ha | bool - name: Restart master controllers service: diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index acebabc91..ef7d54f9f 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,25 +1,4 @@ --- -- name: Gather and set facts for node hosts - hosts: oo_nodes_to_config - vars: - t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - pre_tasks: - - set_fact: - openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}" - when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != "" - roles: - - openshift_facts - tasks: - # Since the master is generating the node certificates before they are - # configured, we need to make sure to set the node properties beforehand if - # we do not want the defaults - - openshift_facts: - role: node - local_facts: - labels: "{{ openshift_node_labels | default(None) }}" - annotations: "{{ openshift_node_annotations | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - - name: Evaluate node groups hosts: localhost become: no @@ -32,7 +11,11 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: + - hostvars[item].openshift is defined + - hostvars[item].openshift.common is defined + - hostvars[item].openshift.common.is_containerized | bool + - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False - name: Configure containerized nodes @@ -47,8 +30,7 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + roles: - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -64,8 +46,6 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml index 0014a5dbd..b3a7399dc 100644 --- a/playbooks/common/openshift-node/network_manager.yml +++ b/playbooks/common/openshift-node/network_manager.yml @@ -1,4 +1,6 @@ --- +- include: ../openshift-cluster/evaluate_groups.yml + - name: Install and configure NetworkManager hosts: oo_all_hosts become: yes diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 01cf948e0..ed2473a43 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -11,6 +11,10 @@ service: name: docker state: restarted + register: l_docker_restart_docker_in_node_result + until: not l_docker_restart_docker_in_node_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -36,6 +40,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config - name: restart node diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml index 05a58db73..e5f41382b 100644 --- a/playbooks/gce/openshift-cluster/cluster_hosts.yml +++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml @@ -4,6 +4,8 @@ g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" +g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}" + g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml index 05a58db73..e5f41382b 100644 --- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml +++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml @@ -4,6 +4,8 @@ g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" +g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new_etcd'] | default([])) }}" + g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 477213f4e..569e00da2 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -37,4 +37,3 @@ openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" openshift_use_calico: "{{ lookup('oo_option', 'use_calico') }}" openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index ccd29be29..4df86effa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -49,11 +49,15 @@ - '{{ instances }}' - [ user-data, meta-data ] +- name: Check for genisoimage + command: which genisoimage + register: which_genisoimage + - name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data" args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/" + creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso" with_items: '{{ instances }}' - name: Refresh the libvirt storage pool for openshift diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml index 505f7b3a8..12c9fd442 100644 --- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml +++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml @@ -4,6 +4,8 @@ g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([]) g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}" +g_new_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_etcd'] | default([])) }}" + g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}" g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}" |