diff options
| author | Andrew Butcher <abutcher@redhat.com> | 2017-11-16 15:49:17 -0500 | 
|---|---|---|
| committer | Russell Teague <rteague@redhat.com> | 2017-12-05 14:02:23 -0500 | 
| commit | 259272fa2359fd4d3bd78291bd0b06a1261c4b01 (patch) | |
| tree | d8ea0b2afe7df6c400160c7b2f3870f9e44bc6a8 | |
| parent | 968f614e984da91a4e883a9642af8e66d49d87a0 (diff) | |
Remove all references to pacemaker (pcs, pcsd) and openshift.master.cluster_method.
With pacemaker removed there is no longer a need for
openshift.master.cluster_method. We only have one option.
21 files changed, 8 insertions, 243 deletions
| diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 3a9944ba4..e3b56d7a1 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -298,24 +298,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # Set cockpit plugins  #osm_cockpit_plugins=['cockpit-kubernetes'] -# Native high availability cluster method with optional load balancer. +# Native high availability (default cluster method)  # If no lb group is defined, the installer assumes that a load balancer has  # been preconfigured. For installation the value of  # openshift_master_cluster_hostname must resolve to the load balancer  # or to one or all of the masters defined in the inventory if no load  # balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25  #openshift_master_cluster_hostname=openshift-ansible.test.example.com  #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0c2a2c7e8..ed7a7bd1a 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -62,7 +62,6 @@      - origin-master      - origin-master-api      - origin-master-controllers -    - pcsd      failed_when: false  - hosts: etcd @@ -384,8 +383,6 @@      - origin-excluder      - origin-docker-excluder      - origin-master -    - pacemaker -    - pcs      register: result      until: result | success @@ -456,8 +453,6 @@      - /etc/sysconfig/origin-master-api      - /etc/sysconfig/origin-master-controllers      - /usr/share/openshift/examples -    - /var/lib/pacemaker -    - /var/lib/pcsd      - /usr/lib/systemd/system/atomic-openshift-master-api.service      - /usr/lib/systemd/system/atomic-openshift-master-controllers.service      - /usr/lib/systemd/system/origin-master-api.service diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index b7cfbe4e4..a90cd6b22 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -19,8 +19,6 @@      openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"      omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"    roles: -  - role: openshift_master_cluster -    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"    - role: openshift_project_request_template      when: openshift_project_request_template_manage    - role: openshift_examples diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 97acc5d5d..ecf8f15d9 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -183,7 +183,6 @@    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when:    - yedit_output.changed -  - openshift.master.cluster_method == 'native'  # We retry the controllers because the API may not be 100% initialized yet.  - name: restart master controllers @@ -194,7 +193,6 @@    until: result.rc == 0    when:    - yedit_output.changed -  - openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 5dbb21502..1077d0b9c 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -14,9 +14,6 @@      - role: common        local_facts:          rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" -    - role: master -      local_facts: -        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"  # Creating a temp file on localhost, we then check each system that will  # be rebooted to see if that file exists, if so we know we're running diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index cb8af4a9e..f22243fbd 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -337,7 +337,6 @@ variables](https://docs.openshift.com/container-platform/3.6/install_config/inst  in `inventory/group_vars/OSEv3.yml`. For example, given a load balancer node  under the ansible group named `ext_lb`: -    openshift_master_cluster_method: native      openshift_master_cluster_hostname: "{{ groups.ext_lb.0 }}"      openshift_master_cluster_public_hostname: "{{ groups.ext_lb.0 }}" diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 68d898d9a..933117127 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -6,7 +6,6 @@ openshift_deployment_type: origin  #openshift_release: v3.5  openshift_master_default_subdomain: "apps.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}" -openshift_master_cluster_method: native  openshift_master_cluster_public_hostname: "console.{{ openshift_openstack_clusterid }}.{{ openshift_openstack_public_dns_domain }}"  osm_default_node_selector: 'region=primary' diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 410b739e9..cb83c8ead 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -3,8 +3,7 @@    systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: >      (openshift_master_ha | bool) and -    (not master_api_service_status_changed | default(false)) and -    openshift.master.cluster_method == 'native' +    (not master_api_service_status_changed | default(false))  # TODO: need to fix up ignore_errors here  # We retry the controllers because the API may not be 100% initialized yet. @@ -16,6 +15,5 @@    until: result.rc == 0    when: >      (openshift_master_ha | bool) and -    (not master_controllers_service_status_changed | default(false)) and -    openshift.master.cluster_method == 'native' +    (not master_controllers_service_status_changed | default(false))    ignore_errors: yes diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -1,7 +1,7 @@  ---  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool))    notify: Verify API Server  # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@    delay: 5    register: result    until: result.rc == 0 -  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool))  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 359536202..e6b8b8ac8 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -5,7 +5,6 @@      state: restarted    when:    - not (master_api_service_status_changed | default(false) | bool) -  - openshift.master.cluster_method == 'native'    notify:    - Verify API Server @@ -18,7 +17,6 @@    until: result.rc == 0    when:    - not (master_controllers_service_status_changed | default(false) | bool) -  - openshift.master.cluster_method == 'native'  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index e52cd6231..5f4e6df71 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,25 +11,6 @@    - openshift_master_oauth_grant_method is defined    - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods -# HA Variable Validation -- fail: -    msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations" -  when: -  - openshift.master.ha | bool -  - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]) -- fail: -    msg: "openshift_master_cluster_password must be set for multi-master installations" -  when: -  - openshift.master.ha | bool -  - openshift.master.cluster_method == "pacemaker" -  - openshift_master_cluster_password is not defined or not openshift_master_cluster_password -- fail: -    msg: "Pacemaker based HA is not supported at this time when used with containerized installs" -  when: -  - openshift.master.ha | bool -  - openshift.master.cluster_method == "pacemaker" -  - openshift.common.is_containerized | bool -  - name: Open up firewall ports    import_tasks: firewall.yml @@ -226,7 +207,6 @@      enabled: yes      state: started    when: -  - openshift.master.cluster_method == 'native'    - inventory_hostname == openshift_master_hosts[0]    register: l_start_result    until: not l_start_result | failed @@ -241,14 +221,12 @@  - set_fact:      master_api_service_status_changed: "{{ l_start_result | changed }}"    when: -  - openshift.master.cluster_method == 'native'    - inventory_hostname == openshift_master_hosts[0]  - pause:      seconds: 15    when:    - openshift.master.ha | bool -  - openshift.master.cluster_method == 'native'  - name: Start and enable master api all masters    systemd: @@ -256,7 +234,6 @@      enabled: yes      state: started    when: -  - openshift.master.cluster_method == 'native'    - inventory_hostname != openshift_master_hosts[0]    register: l_start_result    until: not l_start_result | failed @@ -271,14 +248,12 @@  - set_fact:      master_api_service_status_changed: "{{ l_start_result | changed }}"    when: -  - openshift.master.cluster_method == 'native'    - inventory_hostname != openshift_master_hosts[0]  # A separate wait is required here for native HA since notifies will  # be resolved after all tasks in the role.  - include_tasks: check_master_api_is_ready.yml    when: -  - openshift.master.cluster_method == 'native'    - master_api_service_status_changed | bool  - name: Start and enable master controller service @@ -286,8 +261,6 @@      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started -  when: -  - openshift.master.cluster_method == 'native'    register: l_start_result    until: not l_start_result | failed    retries: 1 @@ -301,30 +274,6 @@  - name: Set fact master_controllers_service_status_changed    set_fact:      master_controllers_service_status_changed: "{{ l_start_result | changed }}" -  when: -  - openshift.master.cluster_method == 'native' - -- name: Install cluster packages -  package: name=pcs state=present -  when: -  - openshift.master.cluster_method == 'pacemaker' -  - not openshift.common.is_containerized | bool -  register: l_install_result -  until: l_install_result | success - -- name: Start and enable cluster service -  systemd: -    name: pcsd -    enabled: yes -    state: started -  when: -  - openshift.master.cluster_method == 'pacemaker' -  - not openshift.common.is_containerized | bool - -- name: Set the cluster user password -  shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster -  when: -  - l_install_result | changed  - name: node bootstrap settings    include_tasks: bootstrap.yml diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 9d11ed574..ee76413e3 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -25,7 +25,6 @@      state: absent    ignore_errors: true    when: -  - openshift.master.cluster_method == "native"    - not l_is_master_system_container | bool  # This is the image used for both HA and non-HA clusters: @@ -43,7 +42,6 @@      src: "{{ ha_svc_template_path }}/atomic-openshift-master-{{ item }}.service.j2"      dest: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master-{{ item }}.service"    when: -  - openshift.master.cluster_method == "native"    - not l_is_master_system_container | bool    with_items:    - api @@ -63,22 +61,17 @@    - api    - controllers    when: -  - openshift.master.cluster_method == "native"    - not l_is_master_system_container | bool  - name: Preserve Master API Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api    register: l_master_api_proxy -  when: -  - openshift.master.cluster_method == "native"    failed_when: false    changed_when: false  - name: Preserve Master API AWS options    command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api    register: master_api_aws -  when: -  - openshift.master.cluster_method == "native"    failed_when: false    changed_when: false @@ -87,14 +80,11 @@      src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api      backup: true -  when: -  - openshift.master.cluster_method == "native"    notify:    - restart master api  - name: Restore Master API Proxy Config Options    when: -  - openshift.master.cluster_method == "native"    - l_master_api_proxy.rc == 0    - "'http_proxy' not in openshift.common"    - "'https_proxy' not in openshift.common" @@ -105,7 +95,6 @@  - name: Restore Master API AWS Options    when: -  - openshift.master.cluster_method == "native"    - master_api_aws.rc == 0    - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)    lineinfile: @@ -117,16 +106,12 @@  - name: Preserve Master Controllers Proxy Config options    command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers    register: master_controllers_proxy -  when: -  - openshift.master.cluster_method == "native"    failed_when: false    changed_when: false  - name: Preserve Master Controllers AWS options    command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers    register: master_controllers_aws -  when: -  - openshift.master.cluster_method == "native"    failed_when: false    changed_when: false @@ -135,8 +120,6 @@      src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"      dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers      backup: true -  when: -  - openshift.master.cluster_method == "native"    notify:    - restart master controllers @@ -146,7 +129,6 @@      line: "{{ item }}"    with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"    when: -  - openshift.master.cluster_method == "native"    - master_controllers_proxy.rc == 0    - "'http_proxy' not in openshift.common"    - "'https_proxy' not in openshift.common" @@ -157,6 +139,5 @@      line: "{{ item }}"    with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"    when: -  - openshift.master.cluster_method == "native"    - master_controllers_aws.rc == 0    - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index f9f57bc72..92668b227 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -120,7 +120,7 @@ kubernetesMasterConfig:      - application/vnd.kubernetes.protobuf  {% endif %}    controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} -  masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} +  masterCount: {{ openshift.master.master_count }}    masterIP: {{ openshift.common.ip }}    podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}    proxyClientInfo: diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md deleted file mode 100644 index 58dd19ac3..000000000 --- a/roles/openshift_master_cluster/README.md +++ /dev/null @@ -1,34 +0,0 @@ -OpenShift Master Cluster -======================== - -TODO - -Requirements ------------- - -* Ansible 2.2 - -Role Variables --------------- - -TODO - -Dependencies ------------- - -TODO - -Example Playbook ----------------- - -TODO - -License -------- - -Apache License Version 2.0 - -Author Information ------------------- - -Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml deleted file mode 100644 index c452b165e..000000000 --- a/roles/openshift_master_cluster/meta/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -galaxy_info: -  author: Jason DeTiberus -  description: -  company: Red Hat, Inc. -  license: Apache License, Version 2.0 -  min_ansible_version: 2.2 -  platforms: -  - name: EL -    versions: -    - 7 -  categories: -  - cloud -  - system -dependencies: [] diff --git a/roles/openshift_master_cluster/tasks/configure.yml b/roles/openshift_master_cluster/tasks/configure.yml deleted file mode 100644 index 1b94598dd..000000000 --- a/roles/openshift_master_cluster/tasks/configure.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- fail: -    msg: This role requires that openshift_master_cluster_vip is set -  when: openshift_master_cluster_vip is not defined or not openshift_master_cluster_vip -- fail: -    msg: This role requires that openshift_master_cluster_public_vip is set -  when: openshift_master_cluster_public_vip is not defined or not openshift_master_cluster_public_vip - -- name: Authenticate to the cluster -  command: pcs cluster auth -u hacluster -p {{ openshift_master_cluster_password }} {{ omc_cluster_hosts }} - -- name: Create the cluster -  command: pcs cluster setup --name openshift_master {{ omc_cluster_hosts }} - -- name: Start the cluster -  command: pcs cluster start --all - -- name: Enable the cluster on all nodes -  command: pcs cluster enable --all - -- name: Set default resource stickiness -  command: pcs resource defaults resource-stickiness=100 - -- name: Add the cluster VIP resource -  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_vip }} --group {{ openshift.common.service_type }}-master - -- name: Add the cluster public VIP resource -  command: pcs resource create virtual-ip IPaddr2 ip={{ openshift_master_cluster_public_vip }} --group {{ openshift.common.service_type }}-master -  when: openshift_master_cluster_public_vip != openshift_master_cluster_vip - -- name: Add the cluster master service resource -  command: pcs resource create master systemd:{{ openshift.common.service_type }}-master op start timeout=90s stop timeout=90s --group {{ openshift.common.service_type }}-master - -- name: Disable stonith -  command: pcs property set stonith-enabled=false - -- name: Wait for the clustered master service to be available -  wait_for: -    host: "{{ openshift_master_cluster_vip }}" -    port: "{{ openshift.master.api_port }}" -    state: started -    timeout: 180 -    delay: 90 diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml deleted file mode 100644 index 41bfc72cb..000000000 --- a/roles/openshift_master_cluster/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- fail: -    msg: "Not possible on atomic hosts for now" -  when: openshift.common.is_containerized | bool - -- name: Test if cluster is already configured -  command: pcs status -  register: pcs_status -  changed_when: false -  failed_when: false -  when: openshift.master.cluster_method == "pacemaker" - -- include_tasks: configure.yml -  when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr" diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index c827f2d26..ff15f693b 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -485,31 +485,6 @@ class FilterModule(object):                             Dumper=AnsibleDumper))      @staticmethod -    def validate_pcs_cluster(data, masters=None): -        ''' Validates output from "pcs status", ensuring that each master -            provided is online. -            Ex: data = ('...', -                        'PCSD Status:', -                        'master1.example.com: Online', -                        'master2.example.com: Online', -                        'master3.example.com: Online', -                        '...') -                masters = ['master1.example.com', -                           'master2.example.com', -                           'master3.example.com'] -               returns True -        ''' -        if not issubclass(type(data), string_types): -            raise errors.AnsibleFilterError("|failed expects data is a string or unicode") -        if not issubclass(type(masters), list): -            raise errors.AnsibleFilterError("|failed expects masters is a list") -        valid = True -        for master in masters: -            if "{0}: Online".format(master) not in data: -                valid = False -        return valid - -    @staticmethod      def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):          ''' Return certificates to synchronize based on facts. '''          if not issubclass(type(hostvars), dict): @@ -553,6 +528,5 @@ class FilterModule(object):      def filters(self):          ''' returns a mapping of filters to methods '''          return {"translate_idps": self.translate_idps, -                "validate_pcs_cluster": self.validate_pcs_cluster,                  "certificates_to_synchronize": self.certificates_to_synchronize,                  "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file} diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 1d367eb9f..0cb87dcaa 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -25,7 +25,6 @@    openshift_facts:      role: master      local_facts: -      cluster_method: "{{ openshift_master_cluster_method | default('native') }}"        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"        api_port: "{{ openshift_master_api_port | default(None) }}" diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index 074b72942..e0329ee7c 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,7 +1,7 @@  ---  - name: restart master api    systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_api_service_status_changed | default(false) | bool))    notify: Verify API Server  # We retry the controllers because the API may not be 100% initialized yet. @@ -11,7 +11,7 @@    delay: 5    register: result    until: result.rc == 0 -  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' +  when: (not (master_controllers_service_status_changed | default(false) | bool))  - name: Verify API Server    # Using curl here since the uri module requires python-httplib2 and diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 1e2af2c61..dda8eb4c6 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -125,7 +125,6 @@ def write_inventory_vars(base_inventory, lb):      base_inventory.write('openshift_override_hostname_check=true\n')      if lb is not None: -        base_inventory.write('openshift_master_cluster_method=native\n')          base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))          base_inventory.write(              "openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname)) @@ -266,7 +265,6 @@ def default_facts(hosts, verbose=False):      facts_env = os.environ.copy()      facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']      facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory'] -    facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'      if 'ansible_log_path' in CFG.settings:          facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']      if 'ansible_config' in CFG.settings: | 
