diff options
Diffstat (limited to 'playbooks')
86 files changed, 1761 insertions, 856 deletions
| diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/noc/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/adhoc/noc/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 0755d8bc5..4edd44fe4 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -7,245 +7,369 @@  #    images  #    RPMs  --- -- hosts: -    - OSEv3:children +- hosts: OSEv3:children +  become: yes +  tasks: +  - name: Detecting Operating System +    shell: ls /run/ostree-booted +    ignore_errors: yes +    failed_when: false +    register: ostree_output + +  # Since we're not calling openshift_facts we'll do this for now +  - set_fact: +      is_atomic: "{{ ostree_output.rc == 0 }}" +  - set_fact: +      is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" +- hosts: nodes    become: yes +  tasks: +  - name: Stop services +    service: name={{ item }} state=stopped +    with_items: +    - atomic-enterprise-node +    - atomic-openshift-node +    - openshift-node +    - openvswitch +    - origin-node +    failed_when: false + +  - name: unmask services +    command: systemctl unmask "{{ item }}" +    changed_when: False +    failed_when: False +    with_items: +    - firewalld + +  - name: Remove packages +    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" +    when: not is_atomic | bool +    with_items: +    - atomic-enterprise +    - atomic-enterprise-node +    - atomic-enterprise-sdn-ovs +    - atomic-openshift +    - atomic-openshift-clients +    - atomic-openshift-node +    - atomic-openshift-sdn-ovs +    - cockpit-bridge +    - cockpit-docker +    - cockpit-shell +    - cockpit-ws +    - kubernetes-client +    - openshift +    - openshift-node +    - openshift-sdn +    - openshift-sdn-ovs +    - openvswitch +    - origin +    - origin-clients +    - origin-node +    - origin-sdn-ovs +    - tuned-profiles-atomic-enterprise-node +    - tuned-profiles-atomic-openshift-node +    - tuned-profiles-openshift-node +    - tuned-profiles-origin-node + +  - shell: systemctl reset-failed +    changed_when: False + +  - shell: systemctl daemon-reload +    changed_when: False + +  - name: Remove br0 interface +    shell: ovs-vsctl del-br br0 +    changed_when: False +    failed_when: False + +  - name: Remove linux interfaces +    shell: ip link del "{{ item }}" +    changed_when: False +    failed_when: False +    with_items: +    - lbr0 +    - vlinuxbr +    - vovsbr + +  - name: restart docker +    service: name=docker state=restarted + +  - name: restart NetworkManager +    service: name=NetworkManager state=restarted + +  - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +    changed_when: False + +  - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +    changed_when: False + +  - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true +    changed_when: False + +  - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node +    changed_when: False +    failed_when: False +    with_items: +    - openshift-enterprise +    - atomic-enterprise +    - origin + +  - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' +    changed_when: False +    failed_when: False +    register: exited_containers_to_delete +    with_items: +    - aep3.*/aep +    - aep3.*/node +    - aep3.*/openvswitch +    - openshift3/ose +    - openshift3/node +    - openshift3/openvswitch +    - openshift/origin + +  - shell: "docker rm {{ item.stdout_lines | join(' ') }}" +    changed_when: False +    failed_when: False +    with_items: "{{ exited_containers_to_delete.results }}" + +  - shell: docker images | egrep {{ item }} | awk '{ print $3 }' +    changed_when: False +    failed_when: False +    register: images_to_delete +    with_items: +    - registry\.access\..*redhat\.com/openshift3 +    - registry\.access\..*redhat\.com/aep3 +    - registry\.qe\.openshift\.com/.* +    - registry\.access\..*redhat\.com/rhel7/etcd +    - docker.io/openshift +    when: openshift_uninstall_images | default(True) | bool + +  - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" +    changed_when: False +    failed_when: False +    with_items: "{{ images_to_delete.results }}" +    when: openshift_uninstall_images | default(True) | bool + +  - name: Remove sdn drop files +    file: +      path: /run/openshift-sdn +      state: absent + +  - name: Remove remaining files +    file: path={{ item }} state=absent +    with_items: +    - /etc/ansible/facts.d/openshift.fact +    - /etc/atomic-enterprise +    - /etc/openshift +    - /etc/openshift-sdn +    - /etc/origin +    - /etc/systemd/system/atomic-openshift-node.service +    - /etc/systemd/system/atomic-openshift-node-dep.service +    - /etc/systemd/system/origin-node.service +    - /etc/systemd/system/origin-node-dep.service +    - /etc/systemd/system/openvswitch.service +    - /etc/sysconfig/atomic-enterprise-node +    - /etc/sysconfig/atomic-openshift-node +    - /etc/sysconfig/atomic-openshift-node-dep +    - /etc/sysconfig/origin-node +    - /etc/sysconfig/origin-node-dep +    - /etc/sysconfig/openshift-node +    - /etc/sysconfig/openshift-node-dep +    - /etc/sysconfig/openvswitch +    - /etc/sysconfig/origin-node +    - /etc/systemd/system/atomic-openshift-node.service.wants +    - /run/openshift-sdn +    - /var/lib/atomic-enterprise +    - /var/lib/openshift +    - /var/lib/origin +    - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh +    - /etc/dnsmasq.d/origin-dns.conf +    - /etc/dnsmasq.d/origin-upstream-dns.conf +- hosts: masters +  become: yes    tasks: -    - name: Detecting Operating System -      shell: ls /run/ostree-booted -      ignore_errors: yes -      failed_when: false -      register: ostree_output - -      # Since we're not calling openshift_facts we'll do this for now -    - set_fact: -        is_atomic: "{{ ostree_output.rc == 0 }}" -    - set_fact: -        is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" - -    - name: Remove br0 interface -      shell: ovs-vsctl del-br br0 -      changed_when: False -      failed_when: False - -    - name: Stop services -      service: name={{ item }} state=stopped -      with_items: -        - atomic-enterprise-master -        - atomic-enterprise-node -        - atomic-openshift-master -        - atomic-openshift-master-api -        - atomic-openshift-master-controllers -        - atomic-openshift-node -        - etcd -        - haproxy -        - openshift-master -        - openshift-master-api -        - openshift-master-controllers -        - openshift-node -        - openvswitch -        - origin-master -        - origin-master-api -        - origin-master-controllers -        - origin-node -        - pcsd -      failed_when: false - -    - name: unmask services -      command: systemctl unmask "{{ item }}" -      changed_when: False -      failed_when: False -      with_items: -        - etcd -        - firewalld - -    - name: Stop additional atomic services -      service: name={{ item }} state=stopped -      when: is_containerized | bool -      with_items: -        - etcd_container -      failed_when: false - -    - name: Remove packages -      action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" -      when: not is_atomic | bool -      with_items: -        - atomic-enterprise -        - atomic-enterprise-master -        - atomic-enterprise-node -        - atomic-enterprise-sdn-ovs -        - atomic-openshift -        - atomic-openshift-clients -        - atomic-openshift-master -        - atomic-openshift-node -        - atomic-openshift-sdn-ovs -        - cockpit-bridge -        - cockpit-docker -        - cockpit-shell -        - cockpit-ws -        - corosync -        - etcd -        - haproxy -        - kubernetes-client -        - openshift -        - openshift-master -        - openshift-node -        - openshift-sdn -        - openshift-sdn-ovs -        - openvswitch -        - origin -        - origin-clients -        - origin-master -        - origin-node -        - origin-sdn-ovs -        - pacemaker -        - pcs -        - tuned-profiles-atomic-enterprise-node -        - tuned-profiles-atomic-openshift-node -        - tuned-profiles-openshift-node -        - tuned-profiles-origin-node - -    - name: Remove linux interfaces -      shell: ip link del "{{ item }}" -      changed_when: False -      failed_when: False -      with_items: -        - lbr0 -        - vlinuxbr -        - vovsbr - -    - shell: systemctl reset-failed -      changed_when: False - -    - shell: systemctl daemon-reload -      changed_when: False - -    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true -      changed_when: False - -    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node -      changed_when: False -      failed_when: False -      with_items: -        - openshift-enterprise -        - atomic-enterprise -        - origin - -    - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' -      changed_when: False -      failed_when: False -      register: exited_containers_to_delete -      with_items: -        - aep3.*/aep -        - aep3.*/node -        - aep3.*/openvswitch -        - openshift3/ose -        - openshift3/node -        - openshift3/openvswitch -        - openshift/origin - -    - shell: "docker rm {{ item.stdout_lines | join(' ') }}" -      changed_when: False -      failed_when: False -      with_items: "{{ exited_containers_to_delete.results }}" - -    - shell: docker images | egrep {{ item }} | awk '{ print $3 }' -      changed_when: False -      failed_when: False -      register: images_to_delete -      with_items: -        - registry\.access\..*redhat\.com/openshift3 -        - registry\.access\..*redhat\.com/aep3 -        - registry\.qe\.openshift\.com/.* -        - registry\.access\..*redhat\.com/rhel7/etcd -        - docker.io/openshift - -    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}" -      changed_when: False -      failed_when: False -      with_items: "{{ images_to_delete.results }}" -     -    - name: Remove sdn drop files -      file:  -        path: /run/openshift-sdn -        state: absent -         -    - name: restart docker -      service: -        name: docker -        state: restarted - -    - name: Remove remaining files -      file: path={{ item }} state=absent -      with_items: -        - "~{{ ansible_ssh_user }}/.kube" -        - /etc/ansible/facts.d/openshift.fact -        - /etc/atomic-enterprise -        - /etc/corosync -        - /etc/etcd -        - /etc/openshift -        - /etc/openshift-sdn -        - /etc/origin -        - /etc/systemd/system/atomic-openshift-master.service -        - /etc/systemd/system/atomic-openshift-master-api.service -        - /etc/systemd/system/atomic-openshift-master-controllers.service -        - /etc/systemd/system/atomic-openshift-node.service -        - /etc/systemd/system/etcd_container.service -        - /etc/systemd/system/openvswitch.service -        - /etc/sysconfig/atomic-enterprise-master -        - /etc/sysconfig/atomic-enterprise-master-api -        - /etc/sysconfig/atomic-enterprise-master-controllers -        - /etc/sysconfig/atomic-enterprise-node -        - /etc/sysconfig/atomic-openshift-master -        - /etc/sysconfig/atomic-openshift-master-api -        - /etc/sysconfig/atomic-openshift-master-controllers -        - /etc/sysconfig/atomic-openshift-node -        - /etc/sysconfig/openshift-master -        - /etc/sysconfig/openshift-node -        - /etc/sysconfig/openvswitch -        - /etc/sysconfig/origin-master -        - /etc/sysconfig/origin-master-api -        - /etc/sysconfig/origin-master-controllers -        - /etc/sysconfig/origin-node -        - /etc/systemd/system/atomic-openshift-node.service.wants -        - /root/.kube -        - /run/openshift-sdn -        - /usr/share/openshift/examples -        - /var/lib/atomic-enterprise -        - /var/lib/etcd -        - /var/lib/openshift -        - /var/lib/origin -        - /var/lib/pacemaker -        - /usr/lib/systemd/system/atomic-openshift-master-api.service -        - /usr/lib/systemd/system/atomic-openshift-master-controllers.service -        - /usr/lib/systemd/system/origin-master-api.service -        - /usr/lib/systemd/system/origin-master-controllers.service -        - /usr/local/bin/openshift -        - /usr/local/bin/oadm -        - /usr/local/bin/oc -        - /usr/local/bin/kubectl -        - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh -        - /etc/dnsmasq.d/origin-dns.conf -        - /etc/dnsmasq.d/origin-upstream-dns.conf - -    # Since we are potentially removing the systemd unit files for separated -    # master-api and master-controllers services, so we need to reload the -    # systemd configuration manager -    - name: Reload systemd manager configuration -      command: systemctl daemon-reload +  - name: Stop services +    service: name={{ item }} state=stopped +    with_items: +    - atomic-enterprise-master +    - atomic-openshift-master +    - atomic-openshift-master-api +    - atomic-openshift-master-controllers +    - openshift-master +    - openshift-master-api +    - openshift-master-controllers +    - origin-master +    - origin-master-api +    - origin-master-controllers +    - pcsd +    failed_when: false -- hosts: nodes +  - name: unmask services +    command: systemctl unmask "{{ item }}" +    changed_when: False +    failed_when: False +    with_items: +    - firewalld +    - atomic-openshift-master + +  - name: Remove packages +    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" +    when: not is_atomic | bool +    with_items: +    - atomic-enterprise +    - atomic-enterprise-master +    - atomic-openshift +    - atomic-openshift-clients +    - atomic-openshift-master +    - cockpit-bridge +    - cockpit-docker +    - cockpit-shell +    - cockpit-ws +    - corosync +    - kubernetes-client +    - openshift +    - openshift-master +    - origin +    - origin-clients +    - origin-master +    - pacemaker +    - pcs + +  - shell: systemctl reset-failed +    changed_when: False + +  - shell: systemctl daemon-reload +    changed_when: False + +  - name: Remove remaining files +    file: path={{ item }} state=absent +    with_items: +    - "~{{ ansible_ssh_user }}/.kube" +    - /etc/ansible/facts.d/openshift.fact +    - /etc/atomic-enterprise +    - /etc/corosync +    - /etc/openshift +    - /etc/openshift-sdn +    - /etc/origin +    - /etc/systemd/system/atomic-openshift-master.service +    - /etc/systemd/system/atomic-openshift-master-api.service +    - /etc/systemd/system/atomic-openshift-master-controllers.service +    - /etc/systemd/system/origin-master.service +    - /etc/systemd/system/origin-master-api.service +    - /etc/systemd/system/origin-master-controllers.service +    - /etc/systemd/system/openvswitch.service +    - /etc/sysconfig/atomic-enterprise-master +    - /etc/sysconfig/atomic-enterprise-master-api +    - /etc/sysconfig/atomic-enterprise-master-controllers +    - /etc/sysconfig/atomic-openshift-master +    - /etc/sysconfig/atomic-openshift-master-api +    - /etc/sysconfig/atomic-openshift-master-controllers +    - /etc/sysconfig/origin-master +    - /etc/sysconfig/origin-master-api +    - /etc/sysconfig/origin-master-controllers +    - /etc/sysconfig/openshift-master +    - /etc/sysconfig/openvswitch +    - /etc/sysconfig/origin-master +    - /etc/sysconfig/origin-master-api +    - /etc/sysconfig/origin-master-controllers +    - /root/.kube +    - /usr/share/openshift/examples +    - /var/lib/atomic-enterprise +    - /var/lib/openshift +    - /var/lib/origin +    - /var/lib/pacemaker +    - /var/lib/pcsd +    - /usr/lib/systemd/system/atomic-openshift-master-api.service +    - /usr/lib/systemd/system/atomic-openshift-master-controllers.service +    - /usr/lib/systemd/system/origin-master-api.service +    - /usr/lib/systemd/system/origin-master-controllers.service +    - /usr/local/bin/openshift +    - /usr/local/bin/oadm +    - /usr/local/bin/oc +    - /usr/local/bin/kubectl + +  # Since we are potentially removing the systemd unit files for separated +  # master-api and master-controllers services, so we need to reload the +  # systemd configuration manager +  - name: Reload systemd manager configuration +    command: systemctl daemon-reload + +- hosts: etcd +  become: yes +  tasks: +  - name: Stop services +    service: name={{ item }} state=stopped +    with_items: +    - etcd +    failed_when: false + +  - name: unmask services +    command: systemctl unmask "{{ item }}" +    changed_when: False +    failed_when: False +    with_items: +    - etcd +    - firewalld + +  - name: Stop additional atomic services +    service: name={{ item }} state=stopped +    when: is_containerized | bool +    with_items: +    - etcd_container +    failed_when: false + +  - name: Remove packages +    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" +    when: not is_atomic | bool +    with_items: +    - etcd + +  - shell: systemctl reset-failed +    changed_when: False + +  - shell: systemctl daemon-reload +    changed_when: False + +  - name: Remove remaining files +    file: path={{ item }} state=absent +    with_items: +    - /etc/ansible/facts.d/openshift.fact +    - /etc/etcd +    - /etc/systemd/system/etcd_container.service +    - /var/lib/etcd + +- hosts: lb    become: yes    tasks: -    - name: restart docker -      service: name=docker state=restarted -    - name: restart NetworkManager -      service: name=NetworkManager state=restarted +  - name: Stop services +    service: name={{ item }} state=stopped +    with_items: +    - haproxy +    failed_when: false + +  - name: unmask services +    command: systemctl unmask "{{ item }}" +    changed_when: False +    failed_when: False +    with_items: +    - firewalld + +  - name: Remove packages +    action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" +    when: not is_atomic | bool +    with_items: +    - haproxy + +  - shell: systemctl reset-failed +    changed_when: False + +  - shell: systemctl daemon-reload +    changed_when: False + +  - name: Remove remaining files +    file: path={{ item }} state=absent +    with_items: +    - /etc/ansible/facts.d/openshift.fact +    - /var/lib/haproxy diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml deleted file mode 100644 index eb3f1a1da..000000000 --- a/playbooks/aws/ansible-tower/config.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: "populate oo_hosts_to_config host group if needed" -  hosts: localhost -  gather_facts: no -  connection: local -  become: no -  tasks: -  - name: Evaluate oo_host_group_exp if it's set -    add_host: "name={{ item }} groups=oo_hosts_to_config" -    with_items: "{{ oo_host_group_exp | default(['']) }}" -    when: oo_host_group_exp is defined - -- name: "Configure instances" -  hosts: oo_hosts_to_config -  connection: ssh -  user: root -  vars_files: -    - vars.yml -    - "vars.{{ oo_env }}.yml" -  roles: -    - os_ipv6_disable -    - ansible -    - ansible_tower -    - os_env_extras diff --git a/playbooks/aws/ansible-tower/filter_plugins b/playbooks/aws/ansible-tower/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/aws/ansible-tower/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml deleted file mode 100644 index d40529435..000000000 --- a/playbooks/aws/ansible-tower/launch.yml +++ /dev/null @@ -1,79 +0,0 @@ ---- -- name: Launch instance(s) -  hosts: localhost -  connection: local -  become: no -  gather_facts: no - -  vars: -    inst_region: us-east-1 -    rhel7_ami: ami-9101c8fa -    user_data_file: user_data.txt - -  vars_files: -    - vars.yml -    - "vars.{{ oo_env }}.yml" - -  tasks: -    - name: Launch instances in VPC -      ec2: -        state: present -        region: "{{ inst_region }}" -        keypair: mmcgrath_libra -        group_id: "{{ oo_security_group_ids }}" -        instance_type: c4.xlarge -        image: "{{ rhel7_ami }}" -        count: "{{ oo_new_inst_names | length }}" -        user_data: "{{ lookup('file', user_data_file) }}" -        wait: yes -        assign_public_ip: "{{ oo_assign_public_ip }}" -        vpc_subnet_id: "{{ oo_vpc_subnet_id }}" -      register: ec2 - -    - name: Add Name and environment tags to instances -      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" -      with_together: -        - oo_new_inst_names -        - ec2.instances -      args: -        tags: -          Name: "{{ item.0 }}" - -    - name: Add other tags to instances -      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" -      with_items: ec2.instances -      args: -        tags: "{{ oo_new_inst_tags }}" - -    - name: Add new instances public IPs to oo_hosts_to_config -      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config" -      with_together: -        - oo_new_inst_names -        - ec2.instances - -    - debug: var=ec2 - -    - name: Wait for ssh -      wait_for: "port=22 host={{ item.public_ip }}" -      with_items: ec2.instances - -    - name: Wait for root user setup -      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" -      register: result -      until: result.rc == 0 -      retries: 20 -      delay: 10 -      with_items: ec2.instances - -- name: Initial setup -  hosts: oo_hosts_to_config -  user: root -  gather_facts: true - -  tasks: - -    - name: Update All Things -      action: "{{ ansible_pkg_mgr }} name=* state=latest" - -# Apply the configs, seprate so that just the configs can be run by themselves -- include: config.yml diff --git a/playbooks/aws/ansible-tower/roles b/playbooks/aws/ansible-tower/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/aws/ansible-tower/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/aws/ansible-tower/user_data.txt b/playbooks/aws/ansible-tower/user_data.txt deleted file mode 100644 index 643d17c32..000000000 --- a/playbooks/aws/ansible-tower/user_data.txt +++ /dev/null @@ -1,6 +0,0 @@ -#cloud-config -disable_root: 0 - -system_info: -  default_user: -    name: root diff --git a/playbooks/aws/ansible-tower/vars.ops.yml b/playbooks/aws/ansible-tower/vars.ops.yml deleted file mode 100644 index feb5d786a..000000000 --- a/playbooks/aws/ansible-tower/vars.ops.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -oo_env_long: operations -oo_zabbix_hostgroups: ['OPS Environment'] -oo_vpc_subnet_id: subnet-4f0bdd38  # USE OPS -oo_assign_public_ip: yes -oo_security_group_ids: -  - sg-02c2f267 # Libra (vpc) -  - sg-7fc4f41a # ops (vpc) -  - sg-4dc26829 # ops_tower (vpc) diff --git a/playbooks/aws/ansible-tower/vars.yml b/playbooks/aws/ansible-tower/vars.yml deleted file mode 100644 index ed97d539c..000000000 --- a/playbooks/aws/ansible-tower/vars.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml index 3d88e6b23..0e8eb90c1 100644 --- a/playbooks/aws/openshift-cluster/add_nodes.yml +++ b/playbooks/aws/openshift-cluster/add_nodes.yml @@ -6,14 +6,9 @@    gather_facts: no    vars_files:    - vars.yml -  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]    vars:      oo_extend_env: True    tasks: -  - fail: -      msg: Deployment type not supported for aws provider yet -    when: deployment_type == 'enterprise' -    - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml      vars:        type: "compute" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index f9b367b97..71ce9e787 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,7 +1,20 @@ +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../aws/openshift-cluster/vars.yml -  - ../../aws/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user:     "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo:         "{{ deployment_vars[deployment_type].become }}" @@ -10,12 +23,14 @@      openshift_debug_level: "{{ debug_level }}"      openshift_deployment_type: "{{ deployment_type }}"      openshift_public_hostname: "{{ ec2_ip_address }}" -    openshift_registry_selector: 'type=infra' +    openshift_hosted_registry_selector: 'type=infra'      openshift_hosted_router_selector: 'type=infra' -    openshift_infra_nodes: "{{ g_infra_hosts }}" -    openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}' +    openshift_node_labels: +      region: "{{ deployment_vars[deployment_type].region }}" +      type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}"      openshift_master_cluster_method: 'native'      openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 15b83dfad..3edace493 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -6,12 +6,7 @@    gather_facts: no    vars_files:    - vars.yml -  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]    tasks: -  - fail: -      msg: Deployment type not supported for aws provider yet -    when: deployment_type == 'enterprise' -    - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml    - include: tasks/launch_instances.yml      vars: diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 323d63443..d22c86cda 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -8,80 +8,50 @@      sub_host_type: "{{ g_sub_host_type }}"  - set_fact: -    ec2_region: "{{ lookup('env', 'ec2_region') -                    | default(deployment_vars[deployment_type].region, true) }}" -  when: ec2_region is not defined -- set_fact: -    ec2_image_name: "{{ lookup('env', 'ec2_image_name') -                        | default(deployment_vars[deployment_type].image_name, true) }}" -  when: ec2_image_name is not defined and ec2_image is not defined -- set_fact: -    ec2_image: "{{ lookup('env', 'ec2_image') -                   | default(deployment_vars[deployment_type].image, true) }}" -  when: ec2_image is not defined and not ec2_image_name -- set_fact: -    ec2_keypair: "{{ lookup('env', 'ec2_keypair') -                    | default(deployment_vars[deployment_type].keypair, true) }}" -  when: ec2_keypair is not defined -- set_fact: -    ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet') -                    | default(deployment_vars[deployment_type].vpc_subnet, true) }}" -  when: ec2_vpc_subnet is not defined -- set_fact: -    ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip') -                    | default(deployment_vars[deployment_type].assign_public_ip, true) }}" -  when: ec2_assign_public_ip is not defined - -- set_fact: -    ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" -    ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" +    ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}" +    ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"    when: host_type == "master" and sub_host_type == "default"  - set_fact: -    ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" -    ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" +    ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}" +    ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"    when: host_type == "etcd" and sub_host_type == "default"  - set_fact: -    ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" -    ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" +    ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}" +    ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"    when: host_type == "node" and sub_host_type == "infra"  - set_fact: -    ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}" -    ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}" +    ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}" +    ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"    when: host_type == "node" and sub_host_type == "compute"  - set_fact: -    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type') -                          | default(deployment_vars[deployment_type].type, true) }}" +    ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"    when: ec2_instance_type is not defined  - set_fact: -    ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" +    ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"    when: ec2_security_groups is not defined  - name: Find amis for deployment_type    ec2_ami_find: -    region: "{{ ec2_region }}" -    ami_id: "{{ ec2_image | default(omit, true) }}" -    name: "{{ ec2_image_name | default(omit, true) }}" +    region: "{{ deployment_vars[deployment_type].region }}" +    ami_id: "{{ deployment_vars[deployment_type].image }}" +    name: "{{ deployment_vars[deployment_type].image_name }}"    register: ami_result  - fail: msg="Could not find requested ami"    when: not ami_result.results  - set_fact: -    latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" +    latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"      volume_defs:        etcd:          root:            volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"            device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"            iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}" -        etcd: -          volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}" -          device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}" -          iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"        master:          root:            volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" @@ -107,14 +77,14 @@  - name: Launch instance(s)    ec2:      state: present -    region: "{{ ec2_region }}" -    keypair: "{{ ec2_keypair }}" -    group: "{{ ec2_security_groups }}" +    region: "{{ deployment_vars[deployment_type].region }}" +    keypair: "{{ deployment_vars[deployment_type].keypair }}" +    group: "{{ deployment_vars[deployment_type].security_groups }}"      instance_type: "{{ ec2_instance_type }}" -    image: "{{ latest_ami }}" +    image: "{{ deployment_vars[deployment_type].image }}"      count: "{{ instances | length }}" -    vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}" -    assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}" +    vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}" +    assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"      user_data: "{{ lookup('template', '../templates/user_data.j2') }}"      wait: yes      instance_tags: @@ -127,7 +97,7 @@    register: ec2  - name: Add Name tag to instances -  ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present +  ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present    with_together:    - instances    - ec2.instances @@ -136,29 +106,32 @@        Name: "{{ item.0 }}"  - set_fact: -    instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }}, -                    tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}" +    instance_groups: > +      tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, +      tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }}, +      tag_sub-host-type_{{ sub_host_type }}  - set_fact:      node_label: -      region: "{{ec2_region}}" +      region: "{{ deployment_vars[deployment_type].region }}"        type: "{{sub_host_type}}"    when: host_type == "node"  - set_fact:      node_label: -      region: "{{ec2_region}}" +      region: "{{ deployment_vars[deployment_type].region }}"        type: "{{host_type}}"    when: host_type != "node"  - set_fact:      logrotate:          - name: syslog -          path: "/var/log/cron -                 \n/var/log/maillog -                 \n/var/log/messages -                 \n/var/log/secure -                 \n/var/log/spooler \n" +          path: | +            /var/log/cron +            /var/log/maillog +            /var/log/messages +            /var/log/secure +            /var/log/spooler"            options:              - daily              - rotate 7 @@ -177,6 +150,7 @@      groups: "{{ instance_groups }}"      ec2_private_ip_address: "{{ item.1.private_ip }}"      ec2_ip_address: "{{ item.1.public_ip }}" +    ec2_tag_sub-host-type: "{{ sub_host_type }}"      openshift_node_labels: "{{ node_label }}"      logrotate_scripts: "{{ logrotate }}"    with_together: diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 4b8554c87..b1087f9c4 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -1,30 +1,12 @@  #cloud-config -{% if type == 'etcd' and 'etcd' in volume_defs[type] %} -cloud_config_modules: -- disk_setup -- mounts - -mounts: -- [ xvdb, /var/lib/etcd, xfs, "defaults" ] - -disk_setup: -  xvdb: -    table_type: mbr -    layout: True - -fs_setup: -- label: etcd_storage -  filesystem: xfs -  device: /dev/xvdb -  partition: auto -{% endif %} -  {% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}  mounts:  - [ xvdb ]  - [ ephemeral0 ] +{% endif %}  write_files: +{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}  - content: |      DEVS=/dev/xvdb      VG=docker_vg @@ -32,19 +14,7 @@ write_files:    owner: root:root    permissions: '0644'  {% endif %} - -{% if deployment_type == 'online' %} -devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436 - -disable_root: 0 -growpart: -  mode: auto -  devices: ['/var'] -runcmd: -- xfs_growfs /var -{% endif %} - -{% if deployment_vars[deployment_type].become %} +{% if deployment_vars[deployment_type].become | bool %}  - path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty    permissions: 440    content: | diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index bd31c42dd..d762203b2 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Update - Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Update - Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index d466b9d30..44d9a3e25 100644 --- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -1,7 +1,6 @@  --- -# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.  # Usage: -#  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id> +#  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>  - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml    vars_files:    - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}" diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml deleted file mode 100644 index ed97d539c..000000000 --- a/playbooks/aws/openshift-cluster/vars.defaults.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml deleted file mode 100644 index 2e2f25ccd..000000000 --- a/playbooks/aws/openshift-cluster/vars.online.int.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -ec2_image: ami-9101c8fa -ec2_image_name: libra-ops-rhel7* -ec2_region: us-east-1 -ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.medium -ec2_master_security_groups: [ 'integration', 'integration-master' ] -ec2_infra_instance_type: c4.large -ec2_infra_security_groups: [ 'integration', 'integration-infra' ] -ec2_node_instance_type: m4.large -ec2_node_security_groups: [ 'integration', 'integration-node' ] -ec2_etcd_instance_type: m4.large -ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ] -ec2_vpc_subnet: subnet-987c0def -ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml deleted file mode 100644 index 18a53e12e..000000000 --- a/playbooks/aws/openshift-cluster/vars.online.prod.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -ec2_image: ami-9101c8fa -ec2_image_name: libra-ops-rhel7* -ec2_region: us-east-1 -ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.medium -ec2_master_security_groups: [ 'production', 'production-master' ] -ec2_infra_instance_type: c4.large -ec2_infra_security_groups: [ 'production', 'production-infra' ] -ec2_node_instance_type: m4.large -ec2_node_security_groups: [ 'production', 'production-node' ] -ec2_etcd_instance_type: m4.large -ec2_etcd_security_groups: [ 'production', 'production-etcd' ] -ec2_vpc_subnet: subnet-987c0def -ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml deleted file mode 100644 index 1f9ac4252..000000000 --- a/playbooks/aws/openshift-cluster/vars.online.stage.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -ec2_image: ami-9101c8fa -ec2_image_name: libra-ops-rhel7* -ec2_region: us-east-1 -ec2_keypair: mmcgrath_libra -ec2_master_instance_type: t2.medium -ec2_master_security_groups: [ 'stage', 'stage-master' ] -ec2_infra_instance_type: c4.large -ec2_infra_security_groups: [ 'stage', 'stage-infra' ] -ec2_node_instance_type: m4.large -ec2_node_security_groups: [ 'stage', 'stage-node' ] -ec2_etcd_instance_type: m4.large -ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ] -ec2_vpc_subnet: subnet-987c0def -ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index f9d539e16..d774187f0 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -3,42 +3,31 @@ debug_level: 2  deployment_rhel7_ent_base:    # rhel-7.1, requires cloud access subscription -  image: ami-10663b78 -  image_name: -  region: us-east-1 +  image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}" +  image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" +  region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"    ssh_user: ec2-user    become: yes -  keypair: libra -  type: m4.large -  security_groups: [ 'public' ] -  vpc_subnet: -  assign_public_ip: +  keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" +  type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" +  security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" +  vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" +  assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"  deployment_vars:    origin:      # centos-7, requires marketplace -    image: ami-61bbf104 -    image_name: -    region: us-east-1 +    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}" +    image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" +    region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"      ssh_user: centos      become: yes -    keypair: libra -    type: m4.large -    security_groups: [ 'public' ] -    vpc_subnet: -    assign_public_ip: -  online: -    # private ami -    image: ami-7a9e9812 -    image_name: openshift-rhel7_* -    region: us-east-1 -    ssh_user: root -    become: no -    keypair: libra -    type: m4.large -    security_groups: [ 'public' ] -    vpc_subnet: -    assign_public_ip: +    keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" +    type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" +    security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" +    vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" +    assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}" +    enterprise: "{{ deployment_rhel7_ent_base }}"    openshift-enterprise: "{{ deployment_rhel7_ent_base }}"    atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 5887b3208..c5479d098 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,7 +1,21 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../byo/openshift-cluster/cluster_hosts.yml    vars:      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml new file mode 100644 index 000000000..1c8d99341 --- /dev/null +++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml @@ -0,0 +1,18 @@ +--- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +   +- include: ../../common/openshift-cluster/enable_dnsmasq.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml new file mode 100644 index 000000000..d7798d304 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -0,0 +1,106 @@ + +- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  roles: +  - openshift_facts +  tasks: +  - fail: +      msg: Cannot upgrade Docker on Atomic operating systems. +    when: openshift.common.is_atomic | bool + +  - name: Determine available Docker version +    script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker +    register: g_docker_version_result + +  - name: Check if Docker is installed +    command: rpm -q docker +    register: pkg_check +    failed_when: pkg_check.rc > 1 +    changed_when: no + +  - set_fact: +      g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" + +  - name: Set fact if docker requires an upgrade +    set_fact: +      docker_upgrade: true +    when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<') + +  - fail: +      msg: This playbook requires access to Docker 1.10 or later +    when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') + +# If a node fails, halt everything, the admin will need to clean up and we +# don't want to carry on, potentially taking out every node. The playbook can safely be re-run +# and will not take any action on a node already running 1.10+. +- name: Evacuate and upgrade nodes +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  serial: 1 +  any_errors_fatal: true +  tasks: +  - debug: var=docker_upgrade + +  - name: Prepare for Node evacuation +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + +# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts) +  - name: Evacuate Node for Kubelet upgrade +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config + +  - name: Stop containerized services +    service: name={{ item }} state=stopped +    with_items: +      - "{{ openshift.common.service_type }}-master" +      - "{{ openshift.common.service_type }}-master-api" +      - "{{ openshift.common.service_type }}-master-controllers" +      - "{{ openshift.common.service_type }}-node" +      - etcd_container +      - openvswitch +    failed_when: false +    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + +  - name: Remove all containers and images +    script: files/nuke_images.sh docker +    register: nuke_images_result +    when: docker_upgrade is defined and docker_upgrade | bool + +  - name: Upgrade Docker +    command: "{{ ansible_pkg_mgr}} update -y docker" +    register: docker_upgrade_result +    when: docker_upgrade is defined and docker_upgrade | bool + +  - name: Restart containerized services +    service: name={{ item }} state=started +    with_items: +      - etcd_container +      - openvswitch +      - "{{ openshift.common.service_type }}-master" +      - "{{ openshift.common.service_type }}-master-api" +      - "{{ openshift.common.service_type }}-master-controllers" +      - "{{ openshift.common.service_type }}-node" +    failed_when: false +    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool + +  - name: Wait for master API to come back online +    become: no +    local_action: +      module: wait_for +        host="{{ inventory_hostname }}" +        state=started +        delay=10 +        port="{{ openshift.master.api_port }}" +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config + +  - name: Set node schedulability +    command: > +      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true +    delegate_to: "{{ groups.oo_first_master.0 }}" +    when: openshift.node.schedulable | bool +    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool + diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh new file mode 100644 index 000000000..6b155f7fa --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Stop any running containers +running_container_ids=`docker ps -q` +if test -n "$running_container_ids" +then +    docker stop $running_container_ids +fi + +# Delete all containers +container_ids=`docker ps -a -q` +if test -n "$container_ids" +then +    docker rm -f -v $container_ids +fi + +# Delete all images (forcefully) +image_ids=`docker images -q` +if test -n "$image_ids" +then +    # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144 +    docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge." +fi diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml new file mode 100644 index 000000000..0f86abd89 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -0,0 +1,29 @@ +# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. +# +# Currently only supports upgrading 1.9.x to >= 1.10.x. +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts | default([]) +    changed_when: false + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../cluster_hosts.yml + +- include: ../../../../common/openshift-cluster/evaluate_groups.yml +  vars: +    # Do not allow adding hosts during upgrade. +    g_new_master_hosts: [] +    g_new_node_hosts: [] +    openshift_cluster_id: "{{ cluster_id | default('default') }}" +    openshift_deployment_type: "{{ deployment_type }}" + +- include: docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 628a07752..76bfff9b6 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -1,13 +1,28 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 +    fail: +      msg: "Unsupported ansible version: {{ ansible_version }} found." +    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml -  vars_files: -  - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"    vars: -    g_etcd_hosts: "{{ groups.etcd | default([]) }}" -    g_master_hosts: "{{ groups.masters | default([]) }}" +    # Do not allow adding hosts during upgrade.      g_new_master_hosts: [] -    g_nfs_hosts: "{{ groups.nfs | default([]) }}" -    g_node_hosts: "{{ groups.nodes | default([]) }}" -    g_lb_hosts: "{{ groups.lb | default([]) }}" +    g_new_node_hosts: []      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 8fadd2ce7..c17446162 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -1,13 +1,28 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 +    fail: +      msg: "Unsupported ansible version: {{ ansible_version }} found." +    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -  vars_files: -  - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"    vars: -    g_etcd_hosts: "{{ groups.etcd | default([]) }}" -    g_master_hosts: "{{ groups.masters | default([]) }}" +    # Do not allow adding hosts during upgrade.      g_new_master_hosts: [] -    g_nfs_hosts: "{{ groups.nfs | default([]) }}" -    g_node_hosts: "{{ groups.nodes | default([]) }}" -    g_lb_hosts: "{{ groups.lb | default([]) }}" +    g_new_node_hosts: []      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml index 42078584b..99592d85a 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -1,12 +1,29 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 +    fail: +      msg: "Unsupported ansible version: {{ ansible_version }} found." +    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../../../common/openshift-cluster/evaluate_groups.yml    vars: -    g_etcd_hosts: "{{ groups.etcd | default([]) }}" -    g_master_hosts: "{{ groups.masters | default([]) }}" +    # Do not allow adding hosts during upgrade.      g_new_master_hosts: [] -    g_nfs_hosts: "{{ groups.nfs | default([]) }}" -    g_node_hosts: "{{ groups.nodes | default([]) }}" -    g_lb_hosts: "{{ groups.lb | default([]) }}" +    g_new_node_hosts: []      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_deployment_type: "{{ deployment_type }}"  - include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index 0c91b51d6..24617620b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -1,14 +1,54 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0 +    fail: +      msg: "Unsupported ansible version: {{ ansible_version }} found." +    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge') +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts | default([]) + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../../../common/openshift-cluster/evaluate_groups.yml    vars: -    g_etcd_hosts: "{{ groups.etcd | default([]) }}" -    g_master_hosts: "{{ groups.masters | default([]) }}" +    # Do not allow adding hosts during upgrade.      g_new_master_hosts: [] -    g_nfs_hosts: "{{ groups.nfs | default([]) }}" -    g_node_hosts: "{{ groups.nodes | default([]) }}" -    g_lb_hosts: "{{ groups.lb | default([]) }}" +    g_new_node_hosts: []      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_deployment_type: "{{ deployment_type }}" + +- name: Set oo_options +  hosts: oo_all_hosts +  tasks: +  - set_fact: +      openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" +    when: openshift_docker_additional_registries is not defined +  - set_fact: +      openshift_docker_insecure_registries: "{{ lookup('oo_option',  'docker_insecure_registries') }}" +    when: openshift_docker_insecure_registries is not defined +  - set_fact: +      openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" +    when: openshift_docker_blocked_registries is not defined +  - set_fact: +      openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" +    when: openshift_docker_options is not defined +  - set_fact: +      openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" +    when: openshift_docker_log_driver is not defined +  - set_fact: +      openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" +    when: openshift_docker_log_options is not defined +  - include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml    vars:      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index a78a6aa3d..0cf669ae3 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,4 +1,18 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../common/openshift-master/restart.yml -  vars_files: -  - ../../byo/openshift-cluster/cluster_hosts.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 18797d02a..fced79262 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,7 +1,21 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../common/openshift-master/scaleup.yml -  vars_files: -  - ../../byo/openshift-cluster/cluster_hosts.yml    vars:      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 0343597b5..5737bb0e0 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,7 +1,21 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +  - include: ../../common/openshift-node/scaleup.yml -  vars_files: -  - ../../byo/openshift-cluster/cluster_hosts.yml    vars:      openshift_cluster_id: "{{ cluster_id | default('default') }}"      openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 916dfd0a6..db8703db6 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,4 +1,22 @@  --- +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml + +- include: ../common/openshift-cluster/evaluate_groups.yml +  - name: Gather Cluster facts    hosts: OSEv3    roles: diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 990ddd2f2..f093411ef 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,5 +1,23 @@  --- -- hosts: all +- hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: openshift-cluster/cluster_hosts.yml +   +- include: ../common/openshift-cluster/evaluate_groups.yml  +   +- hosts: l_oo_all_hosts    vars:      openshift_deployment_type: "{{ deployment_type }}"    roles: diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index c5a0f123c..a34322754 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -17,6 +17,7 @@    - role: openshift_master_cluster      when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"    - role: openshift_examples +    registry_url: "{{ openshift.master.registry_url }}"      when: openshift.common.install_examples | bool    - role: openshift_cluster_metrics      when: openshift.common.use_cluster_metrics | bool @@ -27,30 +28,5 @@        (osm_use_cockpit | bool or osm_use_cockpit is undefined )    - role: flannel_register      when: openshift.common.use_flannel | bool -  - role: pods -    when: openshift.common.deployment_type == 'online' -  - role: os_env_extras -    when: openshift.common.deployment_type == 'online' -- name: Create persistent volumes and create hosted services -  hosts: oo_first_master -  vars: -    attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" -    deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" -  roles: -  - role: openshift_persistent_volumes -    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 -  - role: openshift_serviceaccounts -    openshift_serviceaccounts_names: -    - router -    - registry -    openshift_serviceaccounts_namespace: default -    openshift_serviceaccounts_sccs: -    - privileged -  - role: openshift_registry -    registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" -    when: deploy_infra | bool and attach_registry_volume | bool -  - role: openshift_metrics -    when: openshift.hosted.metrics.deploy | bool + diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 99b36098a..5fec11541 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,6 +1,8 @@  ---  - include: evaluate_groups.yml +- include: initialize_facts.yml +  - include: validate_hostnames.yml  - name: Set oo_options @@ -29,6 +31,8 @@  - include: ../openshift-nfs/config.yml +- include: ../openshift-loadbalancer/config.yml +  - include: ../openshift-master/config.yml  - include: additional_config.yml diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml new file mode 100644 index 000000000..f2bcc872f --- /dev/null +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -0,0 +1,66 @@ +--- +- include: evaluate_groups.yml + +- name: Load openshift_facts +  hosts: oo_masters_to_config:oo_nodes_to_config +  roles: +  - openshift_facts +  post_tasks: +  - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" +    when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool +   +- name: Reconfigure masters to listen on our new dns_port +  hosts: oo_masters_to_config +  handlers: +  - include: ../../../roles/openshift_master/handlers/main.yml +  vars: +    os_firewall_allow: +    - service: skydns tcp +      port: "{{ openshift.master.dns_port }}/tcp" +    - service: skydns udp +      port: "{{ openshift.master.dns_port }}/udp" +  roles: +  - os_firewall +  tasks: +  - openshift_facts: +      role: "{{ item.role }}" +      local_facts: "{{ item.local_facts }}" +    with_items: +    - role: common +      local_facts: +        use_dnsmasq: True +    - role: master +      local_facts: +        dns_port: '8053' +  - modify_yaml: +      dest: "{{ openshift.common.config_base }}/master/master-config.yaml" +      yaml_key: dnsConfig.bindAddress +      yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" +    notify: restart master +  - meta: flush_handlers + +- name: Configure nodes for dnsmasq +  hosts: oo_nodes_to_config +  handlers: +  - include: ../../../roles/openshift_node/handlers/main.yml +  pre_tasks: +  - openshift_facts: +      role: "{{ item.role }}" +      local_facts: "{{ item.local_facts }}" +    with_items: +    - role: common +      local_facts: +        use_dnsmasq: True +    - role: node +      local_facts: +        dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" +  vars: +    openshift_deployment_type: "{{ deployment_type }}" +  roles: +    - openshift_node_dnsmasq +  post_tasks: +  - modify_yaml: +      dest: "{{ openshift.common.config_base }}/node/node-config.yaml" +      yaml_key: dnsIP +      yaml_value: "{{ openshift.node.dns_ip }}" +    notify: restart node diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 3fb42a7fa..c5273b08f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -35,7 +35,7 @@        groups: oo_all_hosts        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])    - name: Evaluate oo_masters      add_host: diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml new file mode 100644 index 000000000..37f523246 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -0,0 +1,11 @@ +--- +- name: Initialize host facts +  hosts: oo_all_hosts +  any_errors_fatal: true +  roles: +  - openshift_facts +  tasks: +  - openshift_facts: +      role: common +      local_facts: +        hostname: "{{ openshift_hostname | default(None) }}" diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/common/openshift-cluster/library @@ -0,0 +1 @@ +../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 1cbc0f544..c3077e3c2 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,5 +1,18 @@ +- name: Create persistent volumes +  hosts: oo_first_master +  vars: +    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" +    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" +  roles: +  - role: openshift_persistent_volumes +    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 +  - name: Create Hosted Resources    hosts: oo_first_master +  pre_tasks: +  - set_fact: +      openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" +      openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" +    when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"    roles:    - role: openshift_hosted -    openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 1474bb3ca..e3d16d359 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -1,8 +1,15 @@  --- +- include: evaluate_groups.yml +  - hosts: oo_hosts_to_update    vars:      openshift_deployment_type: "{{ deployment_type }}"    roles: +  # Explicitly calling openshift_facts because it appears that when +  # rhel_subscribe is skipped that the openshift_facts dependency for +  # openshift_repos is also skipped (this is the case at least for Ansible +  # 2.0.2) +  - openshift_facts    - role: rhel_subscribe      when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and            ansible_distribution == "RedHat" and diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh index 96944a78b..9bbeff660 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -2,7 +2,7 @@  # Here we don't really care if this is a master, api, controller or node image.  # We just need to know the version of one of them. -unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1) +unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)  if [ ${1} == "origin" ]; then      image_name="openshift/origin" diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh index a2a9579b5..7bf249742 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh +++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh @@ -1,7 +1,11 @@  #!/bin/bash - -installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ') -available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ') +if [ `which dnf 2> /dev/null` ]; then +  installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) +  available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) +else +  installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null) +  available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null) +fi  echo "---"  echo "curr_version: ${installed}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 51b108f6a..e31e7f8a3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -36,7 +36,8 @@  - name: Ensure AOS 3.0.2 or Origin 1.0.6    hosts: oo_first_master    tasks: -    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later +  - fail: +      msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"      when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )  - name: Update cluster policy @@ -108,5 +109,6 @@    vars:      openshift_examples_import_command: "update"      openshift_deployment_type: "{{ deployment_type }}" +    registry_url: "{{ openshift.master.registry_url }}"    roles:      - openshift_examples diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index a72749a2b..c3c1240d8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -54,7 +54,7 @@    - script: ../files/pre-upgrade-check -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}" @@ -569,6 +569,7 @@    # Update the existing templates    - role: openshift_examples      openshift_examples_import_command: replace +    registry_url: "{{ openshift.master.registry_url }}"    pre_tasks:    - name: Collect all routers      command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml index 196393b2a..f030eed18 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -19,6 +19,7 @@    # Update the existing templates    - role: openshift_examples      openshift_examples_import_command: replace +    registry_url: "{{ openshift.master.registry_url }}"    pre_tasks:    - name: Collect all routers      command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml index 66935e061..85d7073f2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -29,7 +29,7 @@          valid version for a {{ target_version }} upgrade      when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml index 5e62b43a3..e5cfa58aa 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -12,7 +12,7 @@      openshift_version: "{{ openshift_pkg_version | default('') }}"    tasks:    - name: Upgrade master packages -    command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" +    command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"      when: not openshift.common.is_containerized | bool    - name: Ensure python-yaml present for config upgrade @@ -63,7 +63,7 @@    - openshift_facts    tasks:    - name: Upgrade node packages -    command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" +    command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"      when: not openshift.common.is_containerized | bool    - name: Restart node service diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 new file mode 120000 index 000000000..cf20e8959 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/atomic-openshift-master.j2
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker new file mode 120000 index 000000000..5a3dd12b3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster new file mode 120000 index 000000000..3ee319365 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/docker-cluster
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml index d9177e8a0..c7b18f51b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml @@ -10,5 +10,5 @@    register: docker_upgrade  - name: Restart Docker -  service: name=docker state=restarted +  command: systemctl restart docker    when: docker_upgrade | changed diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster new file mode 120000 index 000000000..f44f8eb4f --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/native-cluster
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml index 3fd97ac14..c16965a35 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml @@ -10,6 +10,7 @@      router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"      oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"    roles: +  - openshift_manageiq    # Create the new templates shipped in 3.2, existing templates are left    # unmodified. This prevents the subsequent role definition for    # openshift_examples from failing when trying to replace templates that do @@ -18,6 +19,7 @@    - openshift_examples    # Update the existing templates    - role: openshift_examples +    registry_url: "{{ openshift.master.registry_url }}"      openshift_examples_import_command: replace    pre_tasks:    - name: Collect all routers @@ -36,7 +38,7 @@    - name: Update router image to current version      when: all_routers.rc == 0      command: > -      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p +      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'        --api-version=v1      with_items: haproxy_routers @@ -51,7 +53,7 @@    - name: Update registry image to current version      when: _default_registry.rc == 0      command: > -      {{ oc_cmd }} patch dc/docker-registry -p +      {{ oc_cmd }} patch dc/docker-registry -n default -p        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'        --api-version=v1 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index 6f0af31b8..f163cca86 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -2,10 +2,23 @@  ###############################################################################  # Evaluate host groups and gather facts  ############################################################################### -- name: Load openshift_facts +- name: Load openshift_facts and update repos    hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config    roles:    - openshift_facts +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: oo_masters_to_config:oo_nodes_to_config +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}"  - name: Evaluate additional groups for upgrade    hosts: localhost @@ -52,7 +65,7 @@          valid version for a {{ target_version }} upgrade      when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<') -- name: Verify upgrade can proceed +- name: Verify master processes    hosts: oo_masters_to_config    roles:    - openshift_facts @@ -83,7 +96,7 @@        enabled: yes      when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool -- name: Verify upgrade can proceed +- name: Verify node processes    hosts: oo_nodes_to_config    roles:    - openshift_facts @@ -95,12 +108,12 @@        enabled: yes      when: openshift.common.is_containerized | bool -- name: Verify upgrade can proceed +- name: Verify upgrade targets    hosts: oo_masters_to_config:oo_nodes_to_config    vars:      target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" -    openshift_docker_hosted_registry_insecure: True      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    upgrading: True    handlers:    - include: ../../../../../roles/openshift_master/handlers/main.yml    - include: ../../../../../roles/openshift_node/handlers/main.yml @@ -109,7 +122,7 @@    # are modified to use the correct image tag.  However, this can trigger a    # docker restart if new configuration is laid down which would immediately    # pull the latest image and defeat the purpose of these tasks. -  - openshift_cli +  - { role: openshift_cli }    pre_tasks:    - name: Clean package cache      command: "{{ ansible_pkg_mgr }} clean all" @@ -199,6 +212,9 @@        msg: Upgrade packages not found      when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) +- name: Verify docker upgrade targets +  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  tasks:    - name: Determine available Docker      script: ../files/rpm_versions.sh docker      register: g_docker_version_result @@ -253,7 +269,7 @@  - name: Backup etcd    hosts: etcd_hosts_to_backup    vars: -    embedded_etcd: "{{ openshift.master.embedded_etcd }}" +    embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"      timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"    roles:    - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml index a91727ecd..964257af5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -4,7 +4,7 @@  ###############################################################################  - name: Upgrade docker -  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config +  hosts: oo_masters_to_config:oo_nodes_to_config    roles:    - openshift_facts    tasks: @@ -20,6 +20,15 @@          openshift_image_tag: "v{{ g_new_version }}"          openshift_version: "{{ g_new_version }}" +- name: Upgrade docker +  hosts: oo_etcd_to_config +  roles: +  - openshift_facts +  tasks: +  # Upgrade docker when host is not atomic and host is not a non-containerized etcd node +  - include: docker_upgrade.yml +    when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized) +  # The cli image is used by openshift_docker_facts to determine the currently installed  # version.  We need to explicitly pull the latest image to handle cases where  # the locally cached 'latest' tag is older the g_new_version. @@ -27,12 +36,15 @@    hosts: oo_masters_to_config:oo_nodes_to_config    roles:    - { role: openshift_docker_facts } +  vars: +    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"    tasks:    - name: Pull Images      command: >        docker pull {{ item }}:latest      with_items:      - "{{ openshift.common.cli_image }}" +    when: openshift.common.is_containerized | bool  ###############################################################################  # Upgrade Masters @@ -128,8 +140,8 @@    vars:      origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"      ent_reconcile_bindings: true -    openshift_docker_hosted_registry_insecure: True      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    upgrading: True    tasks:    - name: Verifying the correct commandline tools are available      shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 0f562e019..50e25984f 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,6 +1,4 @@  --- -- include: evaluate_groups.yml -  - name: Gather and set facts for node hosts    hosts: oo_nodes_to_config    roles: diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 01c092625..a95de8cf3 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,6 +1,7 @@  ---  - name: Set etcd facts needed for generating certs    hosts: oo_etcd_to_config +  any_errors_fatal: true    roles:    - openshift_facts    tasks: @@ -53,7 +54,7 @@          -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .      args:        creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" -    with_items: etcd_needing_server_certs +    with_items: "{{ etcd_needing_server_certs | default([]) }}"    - name: Retrieve the etcd cert tarballs      fetch:        src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" @@ -61,7 +62,7 @@        flat: yes        fail_on_missing: yes        validate_checksum: yes -    with_items: etcd_needing_server_certs +    with_items: "{{ etcd_needing_server_certs | default([]) }}"  # Configure a first etcd host to avoid conflicts in choosing a leader  # if other members come online too quickly. diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..f4392173a --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -0,0 +1,5 @@ +--- +- name: Configure load balancers +  hosts: oo_lb_to_config +  roles: +  - role: openshift_loadbalancer diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/adhoc/noc/filter_plugins +++ b/playbooks/common/openshift-loadbalancer/filter_plugins diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml new file mode 100644 index 000000000..e06a14c89 --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -0,0 +1,20 @@ +--- +- name: Populate g_service_nodes host group if needed +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - fail: msg="new_cluster_state is required to be injected in this playbook" +    when: new_cluster_state is not defined + +  - name: Evaluate g_service_lb +    add_host: name={{ item }} groups=g_service_lb +    with_items: oo_host_group_exp | default([]) + +- name: Change state on lb instance(s) +  hosts: g_service_lb +  connection: ssh +  gather_facts: no +  tasks: +    - service: name=haproxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 060b5aa0d..8fd0904d6 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -46,7 +46,7 @@        openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"      when: openshift_hosted_metrics_duration is not defined    - set_fact: -      openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default(10) }}" +      openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"      when: openshift_hosted_metrics_resolution is not defined    roles:    - openshift_facts @@ -120,7 +120,7 @@          -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .      args:        creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" -    with_items: etcd_needing_client_certs +    with_items: "{{ etcd_needing_client_certs | default([]) }}"    - name: Retrieve the etcd cert tarballs      fetch:        src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" @@ -128,7 +128,7 @@        flat: yes        fail_on_missing: yes        validate_checksum: yes -    with_items: etcd_needing_client_certs +    with_items: "{{ etcd_needing_client_certs | default([]) }}"  - name: Copy the external etcd certs to the masters    hosts: oo_masters_to_config @@ -178,7 +178,7 @@    - name: Check status of master certificates      stat:        path: "{{ openshift.common.config_base }}/master/{{ item }}" -    with_items: openshift_master_certs +    with_items: "{{ openshift_master_certs }}"      register: g_master_cert_stat_result    - set_fact:        master_certs_missing: "{{ False in (g_master_cert_stat_result.results @@ -186,11 +186,6 @@                                  | list ) }}"        master_cert_subdir: master-{{ openshift.common.hostname }}        master_cert_config_dir: "{{ openshift.common.config_base }}/master" -  - set_fact: -      openshift_infra_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) -                                 | oo_nodes_with_label('region', 'infra') -                                 | oo_collect('inventory_hostname') }}" -    when: openshift_infra_nodes is not defined and groups.oo_nodes_to_config | default([]) | length > 0  - name: Configure master certificates    hosts: oo_first_master @@ -204,6 +199,7 @@                                 | oo_collect('openshift.common.all_hostnames')                                 | oo_flatten | unique }}"      sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" +    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"    roles:    - openshift_master_certificates    post_tasks: @@ -213,7 +209,7 @@        state: absent      when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config      with_nested: -    - masters_needing_certs +    - "{{ masters_needing_certs | default([]) }}"      - - master.etcd-client.crt        - master.etcd-client.key @@ -223,7 +219,7 @@          -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .      args:        creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" -    with_items: masters_needing_certs +    with_items: "{{ masters_needing_certs | default([]) }}"    - name: Retrieve the master cert tarball from the master      fetch: @@ -232,34 +228,7 @@        flat: yes        fail_on_missing: yes        validate_checksum: yes -    with_items: masters_needing_certs - -- name: Configure load balancers -  hosts: oo_lb_to_config -  vars: -    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" -    haproxy_limit_nofile: 100000 -    haproxy_global_maxconn: 20000 -    haproxy_default_maxconn: 20000 -    haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" -    haproxy_frontends: -    - name: atomic-openshift-api -      mode: tcp -      options: -      - tcplog -      binds: -      - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" -      default_backend: atomic-openshift-api -    haproxy_backends: -    - name: atomic-openshift-api -      mode: tcp -      option: tcplog -      balance: source -      servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}" -  roles: -  - role: openshift_facts -  - role: haproxy -    when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool +    with_items: "{{ masters_needing_certs | default([]) }}"  - name: Check for cached session secrets    hosts: oo_first_master @@ -346,6 +315,14 @@      openshift_master_count: "{{ openshift.master.master_count }}"      openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"      openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" +    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and  +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    pre_tasks:    - name: Ensure certificate directory exists      file: @@ -367,13 +344,6 @@      group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}      changed_when: False -# Additional instance config for online deployments -- name: Additional instance config -  hosts: oo_masters_deployment_type_online -  roles: -  - pods -  - os_env_extras -  - name: Delete temporary directory on localhost    hosts: localhost    connection: local diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/common/openshift-master/library @@ -0,0 +1 @@ +../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/library/modify_yaml.py b/playbooks/common/openshift-master/library/modify_yaml.py deleted file mode 100755 index a4be10ca3..000000000 --- a/playbooks/common/openshift-master/library/modify_yaml.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - -''' modify_yaml ansible module ''' - -import yaml - -DOCUMENTATION = ''' ---- -module: modify_yaml -short_description: Modify yaml key value pairs -author: Andrew Butcher -requirements: [ ] -''' -EXAMPLES = ''' -- modify_yaml: -    dest: /etc/origin/master/master-config.yaml -    yaml_key: 'kubernetesMasterConfig.masterCount' -    yaml_value: 2 -''' - -def main(): -    ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting -        the key to the desired value. -    ''' - -    # disabling pylint errors for global-variable-undefined and invalid-name -    # for 'global module' usage, since it is required to use ansible_facts -    # pylint: disable=global-variable-undefined, invalid-name, -    # redefined-outer-name -    global module - -    module = AnsibleModule( -        argument_spec=dict( -            dest=dict(required=True), -            yaml_key=dict(required=True), -            yaml_value=dict(required=True), -            backup=dict(required=False, default=True, type='bool'), -        ), -        supports_check_mode=True, -    ) - -    dest = module.params['dest'] -    yaml_key = module.params['yaml_key'] -    yaml_value = module.safe_eval(module.params['yaml_value']) -    backup = module.params['backup'] - -    # Represent null values as an empty string. -    # pylint: disable=missing-docstring, unused-argument -    def none_representer(dumper, data): -        return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') -    yaml.add_representer(type(None), none_representer) - -    try: -        changes = [] - -        yaml_file = open(dest) -        yaml_data = yaml.safe_load(yaml_file.read()) -        yaml_file.close() - -        ptr = yaml_data -        for key in yaml_key.split('.'): -            if key not in ptr and key != yaml_key.split('.')[-1]: -                ptr[key] = {} -            elif key == yaml_key.split('.')[-1]: -                if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): -                    ptr[key] = yaml_value -                    changes.append((yaml_key, yaml_value)) -            else: -                ptr = ptr[key] - -        if len(changes) > 0: -            if backup: -                module.backup_local(dest) -            yaml_file = open(dest, 'w') -            yaml_string = yaml.dump(yaml_data, default_flow_style=False) -            yaml_string = yaml_string.replace('\'\'', '""') -            yaml_file.write(yaml_string) -            yaml_file.close() - -        return module.exit_json(changed=(len(changes) > 0), changes=changes) - -    # ignore broad-except error to avoid stack trace to ansible user -    # pylint: disable=broad-except -    except Exception, e: -        return module.fail_json(msg=str(e)) - -# ignore pylint errors related to the module_utils import -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': -    main() diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 264935a63..80659dc52 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -66,7 +66,7 @@          -C {{ item.config_dir }} .      args:        creates: "{{ item.config_dir }}.tgz" -    with_items: nodes_needing_certs +    with_items: "{{ nodes_needing_certs | default([]) }}"    - name: Retrieve the node config tarballs from the master      fetch: @@ -75,7 +75,7 @@        flat: yes        fail_on_missing: yes        validate_checksum: yes -    with_items: nodes_needing_certs +    with_items: "{{ nodes_needing_certs | default([]) }}"  - name: Deploy node certificates    hosts: oo_nodes_to_config @@ -115,11 +115,14 @@    vars:      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" -    # TODO: configure these based on -    # hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of -    # hardcoding -    openshift_docker_hosted_registry_insecure: True      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles:    - openshift_node @@ -128,11 +131,14 @@    vars:      openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"      openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" -    # TODO: configure these based on -    # hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of -    # hardcoding -    openshift_docker_hosted_registry_insecure: True      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" +    openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and +            openshift_generate_no_proxy_hosts | default(True) | bool }}"    roles:    - openshift_node @@ -148,15 +154,15 @@      register: g_external_etcd_flannel_cert_stat_result      when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)    - set_fact: -      etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results +      etcd_client_flannel_certs_missing: "{{ False in g_external_etcd_flannel_cert_stat_result.results                                               | oo_collect(attribute='stat.exists') -                                             | list | intersect([false])}}" +                                             | list }}"        etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}        etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"        etcd_cert_prefix: node.etcd-        etcd_hostname: "{{ openshift.common.hostname }}"        etcd_ip: "{{ openshift.common.ip }}" -    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) +    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 and (openshift.common.use_flannel | bool)  - name: Configure flannel etcd certificates    hosts: oo_first_etcd @@ -166,9 +172,8 @@    pre_tasks:    - set_fact:        etcd_needing_client_certs: "{{ hostvars -                                   | oo_select_keys(groups['oo_nodes_to_config']) -                                   | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}" -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +                                     | oo_select_keys(groups['oo_nodes_to_config']) +                                     | oo_filter_list('etcd_client_flannel_certs_missing') | default([]) }}"    roles:    - role: openshift_etcd_certificates      when: openshift_use_flannel | default(false) | bool @@ -179,8 +184,7 @@          -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .      args:        creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" -    with_items: etcd_needing_client_certs -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +    with_items: "{{ etcd_needing_client_certs | default([]) }}"    - name: Retrieve the etcd cert tarballs      fetch:        src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" @@ -188,8 +192,7 @@        flat: yes        fail_on_missing: yes        validate_checksum: yes -    with_items: etcd_needing_client_certs -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +    with_items: "{{ etcd_needing_client_certs | default([]) }}"  - name: Copy the external etcd flannel certs to the nodes    hosts: oo_nodes_to_config @@ -200,12 +203,12 @@      file:        path: "{{ openshift.common.config_base }}/node"        state: directory -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +    when: etcd_client_flannel_certs_missing | default(false) | bool    - name: Unarchive the tarball on the master      unarchive:        src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"        dest: "{{ etcd_cert_config_dir }}" -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +    when: etcd_client_flannel_certs_missing | default(false) | bool    - file:        path: "{{ etcd_cert_config_dir }}/{{ item }}"        owner: root @@ -215,7 +218,7 @@      - node.etcd-client.crt      - node.etcd-client.key      - node.etcd-ca.crt -    when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing +    when: etcd_client_flannel_certs_missing | default(false) | bool  - name: Additional node config @@ -245,14 +248,6 @@    - file: name={{ mktemp.stdout }} state=absent      changed_when: False -# Additional config for online type deployments -- name: Additional instance config -  hosts: oo_nodes_deployment_type_online -  gather_facts: no -  roles: -  - os_env_extras -  - os_env_extras_node -  - name: Set schedulability    hosts: oo_first_master    vars: diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index d36f7acea..1d79db353 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -1,6 +1,11 @@  ---  - include: ../openshift-cluster/evaluate_groups.yml +- name: Gather facts +  hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config +  roles: +  - openshift_facts +  - name: Configure docker hosts    hosts: oo_nodes_to_config    vars: diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 475d29293..97572b930 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,8 +1,23 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_become: "{{ deployment_vars[deployment_type].become }}" +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../gce/openshift-cluster/vars.yml -  - ../../gce/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" @@ -11,9 +26,8 @@      openshift_debug_level: "{{ debug_level }}"      openshift_deployment_type: "{{ deployment_type }}"      openshift_hostname: "{{ gce_private_ip }}" -    openshift_registry_selector: 'type=infra' +    openshift_hosted_registry_selector: 'type=infra'      openshift_hosted_router_selector: 'type=infra' -    openshift_infra_nodes: "{{ g_infra_hosts }}"      openshift_master_cluster_method: 'native'      openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py new file mode 100644 index 000000000..fcaa3b850 --- /dev/null +++ b/playbooks/gce/openshift-cluster/library/gce.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +DOCUMENTATION = ''' +--- +module: gce +version_added: "1.4" +short_description: create or terminate GCE instances +description: +     - Creates or terminates Google Compute Engine (GCE) instances.  See +       U(https://cloud.google.com/products/compute-engine) for an overview. +       Full install/configuration instructions for the gce* modules can +       be found in the comments of ansible/test/gce_tests.py. +options: +  image: +    description: +       - image string to use for the instance +    required: false +    default: "debian-7" +  instance_names: +    description: +      - a comma-separated list of instance names to create or destroy +    required: false +    default: null +  machine_type: +    description: +      - machine type to use for the instance, use 'n1-standard-1' by default +    required: false +    default: "n1-standard-1" +  metadata: +    description: +      - a hash/dictionary of custom data for the instance; +        '{"key":"value", ...}' +    required: false +    default: null +  service_account_email: +    version_added: "1.5.1" +    description: +      - service account email +    required: false +    default: null +  service_account_permissions: +    version_added: "2.0" +    description: +      - service account permissions (see +        U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), +        --scopes section for detailed information) +    required: false +    default: null +    choices: [ +      "bigquery", "cloud-platform", "compute-ro", "compute-rw", +      "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", +      "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", +      "storage-rw", "taskqueue", "userinfo-email" +    ] +  pem_file: +    version_added: "1.5.1" +    description: +      - path to the pem file associated with the service account email +    required: false +    default: null +  project_id: +    version_added: "1.5.1" +    description: +      - your GCE project ID +    required: false +    default: null +  name: +    description: +      - identifier when working with a single instance +    required: false +  network: +    description: +      - name of the network, 'default' will be used if not specified +    required: false +    default: "default" +  persistent_boot_disk: +    description: +      - if set, create the instance with a persistent boot disk +    required: false +    default: "false" +  disks: +    description: +      - a list of persistent disks to attach to the instance; a string value +        gives the name of the disk; alternatively, a dictionary value can +        define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry +        will be the boot disk (which must be READ_WRITE). +    required: false +    default: null +    version_added: "1.7" +  state: +    description: +      - desired state of the resource +    required: false +    default: "present" +    choices: ["active", "present", "absent", "deleted"] +  tags: +    description: +      - a comma-separated list of tags to associate with the instance +    required: false +    default: null +  zone: +    description: +      - the GCE zone to use +    required: true +    default: "us-central1-a" +  ip_forward: +    version_added: "1.9" +    description: +      - set to true if the instance can forward ip packets (useful for +        gateways) +    required: false +    default: "false" +  external_ip: +    version_added: "1.9" +    description: +      - type of external ip, ephemeral by default +    required: false +    default: "ephemeral" +  disk_auto_delete: +    version_added: "1.9" +    description: +      - if set boot disk will be removed after instance destruction +    required: false +    default: "true" + +requirements: +    - "python >= 2.6" +    - "apache-libcloud >= 0.13.3" +notes: +  - Either I(name) or I(instance_names) is required. +author: "Eric Johnson (@erjohnso) <erjohnso@google.com>" +''' + +EXAMPLES = ''' +# Basic provisioning example.  Create a single Debian 7 instance in the +# us-central1-a Zone of n1-standard-1 machine type. +- local_action: +    module: gce +    name: test-instance +    zone: us-central1-a +    machine_type: n1-standard-1 +    image: debian-7 + +# Example using defaults and with metadata to create a single 'foo' instance +- local_action: +    module: gce +    name: foo +    metadata: '{"db":"postgres", "group":"qa", "id":500}' + + +# Launch instances from a control node, runs some tasks on the new instances, +# and then terminate them +- name: Create a sandbox instance +  hosts: localhost +  vars: +    names: foo,bar +    machine_type: n1-standard-1 +    image: debian-6 +    zone: us-central1-a +    service_account_email: unique-email@developer.gserviceaccount.com +    pem_file: /path/to/pem_file +    project_id: project-id +  tasks: +    - name: Launch instances +      local_action: gce instance_names={{names}} machine_type={{machine_type}} +                    image={{image}} zone={{zone}} +                    service_account_email={{ service_account_email }} +                    pem_file={{ pem_file }} project_id={{ project_id }} +      register: gce +    - name: Wait for SSH to come up +      local_action: wait_for host={{item.public_ip}} port=22 delay=10 +                    timeout=60 state=started +      with_items: {{gce.instance_data}} + +- name: Configure instance(s) +  hosts: launched +  sudo: True +  roles: +    - my_awesome_role +    - my_awesome_tasks + +- name: Terminate instances +  hosts: localhost +  connection: local +  tasks: +    - name: Terminate instances that were previously launched +      local_action: +        module: gce +        state: 'absent' +        instance_names: {{gce.instance_names}} + +''' + +try: +    import libcloud +    from libcloud.compute.types import Provider +    from libcloud.compute.providers import get_driver +    from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ +        ResourceExistsError, ResourceInUseError, ResourceNotFoundError +    _ = Provider.GCE +    HAS_LIBCLOUD = True +except ImportError: +    HAS_LIBCLOUD = False + +try: +    from ast import literal_eval +    HAS_PYTHON26 = True +except ImportError: +    HAS_PYTHON26 = False + + +def get_instance_info(inst): +    """Retrieves instance information from an instance object and returns it +    as a dictionary. + +    """ +    metadata = {} +    if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: +        for md in inst.extra['metadata']['items']: +            metadata[md['key']] = md['value'] + +    try: +        netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] +    except: +        netname = None +    if 'disks' in inst.extra: +        disk_names = [disk_info['source'].split('/')[-1] +                      for disk_info +                      in sorted(inst.extra['disks'], +                                key=lambda disk_info: disk_info['index'])] +    else: +        disk_names = [] + +    if len(inst.public_ips) == 0: +        public_ip = None +    else: +        public_ip = inst.public_ips[0] + +    return({ +        'image': inst.image is not None and inst.image.split('/')[-1] or None, +        'disks': disk_names, +        'machine_type': inst.size, +        'metadata': metadata, +        'name': inst.name, +        'network': netname, +        'private_ip': inst.private_ips[0], +        'public_ip': public_ip, +        'status': ('status' in inst.extra) and inst.extra['status'] or None, +        'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], +        'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, +    }) + + +def create_instances(module, gce, instance_names): +    """Creates new instances. Attributes other than instance_names are picked +    up from 'module' + +    module : AnsibleModule object +    gce: authenticated GCE libcloud driver +    instance_names: python list of instance names to create + +    Returns: +        A list of dictionaries with instance information +        about the instances that were launched. + +    """ +    image = module.params.get('image') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    disks = module.params.get('disks') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    external_ip = module.params.get('external_ip') +    disk_auto_delete = module.params.get('disk_auto_delete') +    service_account_permissions = module.params.get('service_account_permissions') +    service_account_email = module.params.get('service_account_email') + +    if external_ip == "none": +        external_ip = None + +    new_instances = [] +    changed = False + +    lc_image = gce.ex_get_image(image) +    lc_disks = [] +    disk_modes = [] +    for i, disk in enumerate(disks or []): +        if isinstance(disk, dict): +            lc_disks.append(gce.ex_get_volume(disk['name'])) +            disk_modes.append(disk['mode']) +        else: +            lc_disks.append(gce.ex_get_volume(disk)) +            # boot disk is implicitly READ_WRITE +            disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') +    lc_network = gce.ex_get_network(network) +    lc_machine_type = gce.ex_get_size(machine_type) +    lc_zone = gce.ex_get_zone(zone) + +    # Try to convert the user's metadata value into the format expected +    # by GCE.  First try to ensure user has proper quoting of a +    # dictionary-like syntax using 'literal_eval', then convert the python +    # dict into a python list of 'key' / 'value' dicts.  Should end up +    # with: +    # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] +    if metadata: +        if isinstance(metadata, dict): +            md = metadata +        else: +            try: +                md = literal_eval(str(metadata)) +                if not isinstance(md, dict): +                    raise ValueError('metadata must be a dict') +            except ValueError as e: +                module.fail_json(msg='bad metadata: %s' % str(e)) +            except SyntaxError as e: +                module.fail_json(msg='bad metadata syntax') + +    if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': +        items = [] +        for k, v in md.items(): +            items.append({"key": k, "value": v}) +        metadata = {'items': items} +    else: +        metadata = md + +    ex_sa_perms = [] +    bad_perms = [] +    if service_account_permissions: +        for perm in service_account_permissions: +            if perm not in gce.SA_SCOPES_MAP.keys(): +                bad_perms.append(perm) +        if len(bad_perms) > 0: +            module.fail_json(msg='bad permissions: %s' % str(bad_perms)) +        if service_account_email: +            ex_sa_perms.append({'email': service_account_email}) +        else: +            ex_sa_perms.append({'email': "default"}) +        ex_sa_perms[0]['scopes'] = service_account_permissions + +    # These variables all have default values but check just in case +    if not lc_image or not lc_network or not lc_machine_type or not lc_zone: +        module.fail_json(msg='Missing required create instance variable', +                         changed=False) + +    for name in instance_names: +        pd = None +        if lc_disks: +            pd = lc_disks[0] +        elif persistent_boot_disk: +            try: +                pd = gce.create_volume(None, "%s" % name, image=lc_image) +            except ResourceExistsError: +                pd = gce.ex_get_volume("%s" % name, lc_zone) +        inst = None +        try: +            inst = gce.create_node( +                name, lc_machine_type, lc_image, location=lc_zone, +                ex_network=network, ex_tags=tags, ex_metadata=metadata, +                ex_boot_disk=pd, ex_can_ip_forward=ip_forward, +                external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, +                ex_service_accounts=ex_sa_perms +            ) +            changed = True +        except ResourceExistsError: +            inst = gce.ex_get_node(name, lc_zone) +        except GoogleBaseError as e: +            module.fail_json(msg='Unexpected error attempting to create ' + +                             'instance %s, error: %s' % (name, e.value)) + +        for i, lc_disk in enumerate(lc_disks): +            # Check whether the disk is already attached +            if (len(inst.extra['disks']) > i): +                attached_disk = inst.extra['disks'][i] +                if attached_disk['source'] != lc_disk.extra['selfLink']: +                    module.fail_json( +                        msg=("Disk at index %d does not match: requested=%s found=%s" % ( +                            i, lc_disk.extra['selfLink'], attached_disk['source']))) +                elif attached_disk['mode'] != disk_modes[i]: +                    module.fail_json( +                        msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( +                            i, disk_modes[i], attached_disk['mode']))) +                else: +                    continue +            gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) +            # Work around libcloud bug: attached volumes don't get added +            # to the instance metadata. get_instance_info() only cares about +            # source and index. +            if len(inst.extra['disks']) != i+1: +                inst.extra['disks'].append( +                    {'source': lc_disk.extra['selfLink'], 'index': i}) + +        if inst: +            new_instances.append(inst) + +    instance_names = [] +    instance_json_data = [] +    for inst in new_instances: +        d = get_instance_info(inst) +        instance_names.append(d['name']) +        instance_json_data.append(d) + +    return (changed, instance_json_data, instance_names) + + +def terminate_instances(module, gce, instance_names, zone_name): +    """Terminates a list of instances. + +    module: Ansible module object +    gce: authenticated GCE connection object +    instance_names: a list of instance names to terminate +    zone_name: the zone where the instances reside prior to termination + +    Returns a dictionary of instance names that were terminated. + +    """ +    changed = False +    terminated_instance_names = [] +    for name in instance_names: +        inst = None +        try: +            inst = gce.ex_get_node(name, zone_name) +        except ResourceNotFoundError: +            pass +        except Exception as e: +            module.fail_json(msg=unexpected_error_msg(e), changed=False) +        if inst: +            gce.destroy_node(inst) +            terminated_instance_names.append(inst.name) +            changed = True + +    return (changed, terminated_instance_names) + + +def main(): +    module = AnsibleModule( +        argument_spec=dict( +            image=dict(default='debian-7'), +            instance_names=dict(), +            machine_type=dict(default='n1-standard-1'), +            metadata=dict(), +            name=dict(), +            network=dict(default='default'), +            persistent_boot_disk=dict(type='bool', default=False), +            disks=dict(type='list'), +            state=dict(choices=['active', 'present', 'absent', 'deleted'], +                       default='present'), +            tags=dict(type='list'), +            zone=dict(default='us-central1-a'), +            service_account_email=dict(), +            service_account_permissions=dict(type='list'), +            pem_file=dict(), +            project_id=dict(), +            ip_forward=dict(type='bool', default=False), +            external_ip=dict(choices=['ephemeral', 'none'], +                             default='ephemeral'), +            disk_auto_delete=dict(type='bool', default=True), +        ) +    ) + +    if not HAS_PYTHON26: +        module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") +    if not HAS_LIBCLOUD: +        module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module') + +    gce = gce_connect(module) + +    image = module.params.get('image') +    instance_names = module.params.get('instance_names') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    name = module.params.get('name') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    changed = False + +    inames = [] +    if isinstance(instance_names, list): +        inames = instance_names +    elif isinstance(instance_names, str): +        inames = instance_names.split(',') +    if name: +        inames.append(name) +    if not inames: +        module.fail_json(msg='Must specify a "name" or "instance_names"', +                         changed=False) +    if not zone: +        module.fail_json(msg='Must specify a "zone"', changed=False) + +    json_output = {'zone': zone} +    if state in ['absent', 'deleted']: +        json_output['state'] = 'absent' +        (changed, terminated_instance_names) = terminate_instances( +            module, gce, inames, zone) + +        # based on what user specified, return the same variable, although +        # value could be different if an instance could not be destroyed +        if instance_names: +            json_output['instance_names'] = terminated_instance_names +        elif name: +            json_output['name'] = name + +    elif state in ['active', 'present']: +        json_output['state'] = 'present' +        (changed, instance_data, instance_name_list) = create_instances( +            module, gce, inames) +        json_output['instance_data'] = instance_data +        if instance_names: +            json_output['instance_names'] = instance_name_list +        elif name: +            json_output['name'] = name + +    json_output['changed'] = changed +    module.exit_json(**json_output) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * +if __name__ == '__main__': +    main() diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index e3efd8566..c5c479052 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -17,6 +17,11 @@        - clusterid-{{ cluster_id }}        - host-type-{{ type }}        - sub-host-type-{{ g_sub_host_type }} +    metadata: +      startup-script: | +        #!/bin/bash +        echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }} +    when: instances |length > 0    register: gce diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 9b7a2777a..332f27da7 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index 1497d5520..13c754c1e 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -13,11 +13,6 @@ deployment_vars:      machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"      ssh_user: "{{ lookup('env', 'gce_ssh_user') |  default(ansible_ssh_user, true) }}"      become: yes -  online: -    image: libra-rhel7 -    machine_type: n1-standard-1 -    ssh_user: root -    become: no    enterprise: "{{ deployment_rhel7_ent_base }}"    openshift-enterprise: "{{ deployment_rhel7_ent_base }}"    atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 81a6fff0d..21d82f422 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -2,10 +2,23 @@  # TODO: need to figure out a plan for setting hostname, currently the default  # is localhost, so no hostname value (or public_hostname) value is getting  # assigned +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../libvirt/openshift-cluster/vars.yml -  - ../../libvirt/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" @@ -13,11 +26,11 @@      openshift_cluster_id: "{{ cluster_id }}"      openshift_debug_level: "{{ debug_level }}"      openshift_deployment_type: "{{ deployment_type }}" -    openshift_registry_selector: 'type=infra' +    openshift_hosted_registry_selector: 'type=infra'      openshift_hosted_router_selector: 'type=infra' -    openshift_infra_nodes: "{{ g_infra_hosts }}"      openshift_master_cluster_method: 'native'      openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 701d57d26..2475b9d6b 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -12,9 +12,6 @@      image_name: "{{ deployment_vars[deployment_type].image.name }}"      image_compression: "{{ deployment_vars[deployment_type].image.compression }}"    tasks: -  - fail: msg="Deployment type not supported for libvirt provider yet" -    when: deployment_type == 'online' -    - include: tasks/configure_libvirt.yml    - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index 558dfaccd..833586ffa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -1,7 +1,7 @@  ---  # TODO: Add support for choosing base image based on deployment_type and os  # wanted (os wanted needs support added in bin/cluster with sane defaults: -# fedora/centos for origin, rhel for online/enterprise) +# fedora/centos for origin, rhel for enterprise)  # TODO: create a role to encapsulate some of this complexity, possibly also  # create a module to manage the storage tasks, network tasks, and possibly @@ -83,7 +83,7 @@    with_items: instances  - name: Wait for the VMs to get an IP -  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}''' +  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''    register: nb_allocated_ips    until: nb_allocated_ips.stdout == '{{ instances | length }}'    retries: 60 @@ -91,7 +91,7 @@    when: instances | length != 0  - name: Collect IP addresses of the VMs -  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' +  shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''    register: scratch_ip    with_items: instances diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index 9b7a2777a..28362c984 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local @@ -14,7 +30,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index ca0c903ac..4daaf1c91 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -35,13 +35,6 @@ deployment_vars:                    default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"      ssh_user: openshift      become: yes -  online: -    image: -      url: -      name: -      sha256: -    ssh_user: root -    become: no    enterprise: "{{ deployment_rhel7_ent_base }}"    openshift-enterprise: "{{ deployment_rhel7_ent_base }}"    atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 9c0ca9af9..6fff31826 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,8 +1,21 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../openstack/openshift-cluster/vars.yml -  - ../../openstack/openshift-cluster/cluster_hosts.yml    vars:      g_nodeonmaster: true      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" @@ -10,9 +23,8 @@      openshift_cluster_id: "{{ cluster_id }}"      openshift_debug_level: "{{ debug_level }}"      openshift_deployment_type: "{{ deployment_type }}" -    openshift_registry_selector: 'type=infra' +    openshift_hosted_registry_selector: 'type=infra'      openshift_hosted_router_selector: 'type=infra' -    openshift_infra_nodes: "{{ g_infra_hosts }}"      openshift_master_cluster_method: 'native'      openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 2f05c3adc..422e6dafe 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -288,6 +288,14 @@ resources:            port_range_max: 53          - direction: ingress            protocol: tcp +          port_range_min: 8053 +          port_range_max: 8053 +        - direction: ingress +          protocol: udp +          port_range_min: 8053 +          port_range_max: 8053 +        - direction: ingress +          protocol: tcp            port_range_min: 24224            port_range_max: 24224          - direction: ingress @@ -591,11 +599,17 @@ resources:      type: OS::Heat::MultipartMime      properties:        parts: -        - config: { get_file: user-data }          - config:              str_replace:                template: |                  #cloud-config +                disable_root: true + +                system_info: +                  default_user: +                    name: openshift +                    sudo: ["ALL=(ALL) NOPASSWD: ALL"] +                  write_files:                    - path: /etc/sudoers.d/00-openshift-no-requiretty                      permissions: 440 diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index b6add9e86..b9aae2f4c 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -7,10 +7,6 @@    vars_files:    - vars.yml    tasks: -  - fail: -      msg: "Deployment type not supported for OpenStack provider yet" -    when: deployment_type == 'online' -    # TODO: Write an Ansible module for dealing with HEAT stacks    #       Dealing with the outputs is currently terrible @@ -50,7 +46,7 @@               -P master_flavor={{ openstack_flavor["master"] }}               -P node_flavor={{ openstack_flavor["node"] }}               -P infra_flavor={{ openstack_flavor["infra"] }} -             -P dns_flavor=m1.small +             -P dns_flavor={{ openstack_flavor["dns"] }}               openshift-ansible-{{ cluster_id }}-stack'    - name: Wait for OpenStack Stack readiness diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 539af6524..6d4d23963 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: dns.yml  - name: Populate oo_hosts_to_update group @@ -6,9 +22,6 @@    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -16,7 +29,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index d45ab6b9e..bc53a51b0 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -13,6 +13,7 @@ openstack_ssh_public_key:       "{{ lookup('file', lookup('oo_option', 'public_k  openstack_ssh_access_from:      "{{ lookup('oo_option', 'ssh_from')          |                                      default('0.0.0.0/0',                     True) }}"  openstack_flavor: +  dns:    "{{ lookup('oo_option', 'dns_flavor'       ) | default('m1.small',  True) }}"    etcd:   "{{ lookup('oo_option', 'etcd_flavor'      ) | default('m1.small',  True) }}"    master: "{{ lookup('oo_option', 'master_flavor'    ) | default('m1.small',  True) }}"    infra:  "{{ lookup('oo_option', 'infra_flavor'     ) | default('m1.small',  True) }}" @@ -28,10 +29,6 @@ deployment_vars:      image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"      ssh_user: openshift      become: yes -  online: -    image: -    ssh_user: root -    become: no    enterprise: "{{ deployment_rhel7_ent_base }}"    openshift-enterprise: "{{ deployment_rhel7_ent_base }}"    atomic-enterprise: "{{ deployment_rhel7_ent_base }}" | 
