diff options
74 files changed, 340 insertions, 225 deletions
@@ -35,7 +35,7 @@ not practical to start over at 1.0.      ***      Requirements: -    - Ansible >= 2.1.0 (>= 2.2 is preferred for performance reasons) +    - Ansible >= 2.2.0      - Jinja >= 2.7      - pyOpenSSL      - python-lxml diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 38bc3ad6b..997634777 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -10,7 +10,14 @@ from collections import Mapping  from distutils.util import strtobool  from distutils.version import LooseVersion  from operator import itemgetter -import OpenSSL.crypto + +HAS_OPENSSL=False +try: +    import OpenSSL.crypto +    HAS_OPENSSL=True +except ImportError: +    pass +  import os  import pdb  import pkg_resources @@ -516,6 +523,9 @@ class FilterModule(object):          if not isinstance(internal_hostnames, list):              raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") +        if not HAS_OPENSSL: +            raise errors.AnsibleFilterError("|missing OpenSSL python bindings") +          for certificate in certificates:              if 'names' in certificate.keys():                  continue diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 5a95ecf94..324e2477f 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -619,6 +619,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default  #openshift_master_dynamic_provisioning_enabled=False +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} +  # Configure usage of openshift_clock role.  #openshift_clock_enabled=true diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index be919c105..4a2925599 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -619,6 +619,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default  #openshift_master_dynamic_provisioning_enabled=False +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} +  # Configure usage of openshift_clock role.  #openshift_clock_enabled=true diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 57d4fe4b6..b7f0267c1 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,7 +1,7 @@  - name: Backup etcd    hosts: etcd_hosts_to_backup    vars: -    embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" +    embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"      timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"    roles:    - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index cce844403..192799376 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -14,6 +14,9 @@    connection: local    become: no    tasks: +  - fail: +      msg: 'The etcd upgrade playbook does not support upgrading embedded etcd, simply run the normal playbooks and etcd will be upgraded when your master is updated.' +    when:  "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"    - name: Evaluate etcd_hosts_to_upgrade      add_host:        name: "{{ item }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 684eea343..8c0bd272c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -48,3 +48,18 @@      dest: "{{ openshift.common.config_base}}/master/master-config.yaml"      yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'      yaml_value: service-signer.key + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginConfig' +    yaml_value: "{{ openshift.master.admission_plugin_config }}" + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginOrderOverride' +    yaml_value: + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'kubernetesMasterConfig.admissionConfig' +    yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml new file mode 100644 index 000000000..32de9d94a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -0,0 +1,15 @@ +--- +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginConfig' +    yaml_value: "{{ openshift.master.admission_plugin_config }}" + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginOrderOverride' +    yaml_value: + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'kubernetesMasterConfig.admissionConfig' +    yaml_value: diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index e0afc43ba..31a13aa2a 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -116,6 +116,7 @@      ansible_become: "{{ deployment_vars[deployment_type].become }}"      groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"      openshift_node_labels: "{{ node_label }}" +    libvirt_ip_address: "{{ item.1 }}"    with_together:      - '{{ instances }}'      - '{{ ips }}' diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 7e037f2af..f460b14c8 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -107,6 +107,9 @@        groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'        openshift_node_labels:          type: "etcd" +      openstack: +        public_v4: '{{ item[2] }}' +        private_v4: '{{ item[1] }}'      with_together:        - '{{ parsed_outputs.etcd_names }}'        - '{{ parsed_outputs.etcd_ips }}' @@ -121,6 +124,9 @@        groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'        openshift_node_labels:          type: "master" +      openstack: +        public_v4: '{{ item[2] }}' +        private_v4: '{{ item[1] }}'      with_together:        - '{{ parsed_outputs.master_names }}'        - '{{ parsed_outputs.master_ips }}' @@ -135,6 +141,9 @@        groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'        openshift_node_labels:          type: "compute" +      openstack: +        public_v4: '{{ item[2] }}' +        private_v4: '{{ item[1] }}'      with_together:        - '{{ parsed_outputs.node_names }}'        - '{{ parsed_outputs.node_ips }}' @@ -149,6 +158,9 @@        groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'        openshift_node_labels:          type: "infra" +      openstack: +        public_v4: '{{ item[2] }}' +        private_v4: '{{ item[1] }}'      with_together:        - '{{ parsed_outputs.infra_names }}'        - '{{ parsed_outputs.infra_ips }}' diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml index 43047902d..0f507e75e 100644 --- a/roles/cockpit/meta/main.yml +++ b/roles/cockpit/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: Deploy and Enable cockpit-ws plus optional plugins    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.7 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 1975b92e6..bddad778f 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -10,7 +10,7 @@    when: not openshift.common.is_containerized | bool  - name: Enable cockpit-ws -  service: +  systemd:      name: cockpit.socket      enabled: true      state: started diff --git a/roles/dns/README.md b/roles/dns/README.md index 7e0140772..9a88ce97c 100644 --- a/roles/dns/README.md +++ b/roles/dns/README.md @@ -6,7 +6,7 @@ Configure a DNS server serving IPs of all the nodes of the cluster  Requirements  ------------ -None +Ansible 2.2  Role Variables  -------------- diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml index ef101785e..61fd7a10e 100644 --- a/roles/dns/handlers/main.yml +++ b/roles/dns/handlers/main.yml @@ -1,4 +1,5 @@ +---  - name: restart bind -  service: +  systemd:      name: named      state: restarted diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml index 048274c49..64d56114e 100644 --- a/roles/dns/meta/main.yml +++ b/roles/dns/meta/main.yml @@ -4,5 +4,6 @@ galaxy_info:    description: Deploy and configure a DNS server    company: Amadeus SAS    license: ASL 2.0 +  min_ansible_version: 2.2  dependencies:  - { role: openshift_facts } diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml index 2abe0d9dd..c5ab53b4d 100644 --- a/roles/dns/tasks/main.yml +++ b/roles/dns/tasks/main.yml @@ -11,7 +11,6 @@    template:      dest: "/tmp/dockerbuild/Dockerfile"      src: Dockerfile -  register: install_result    when: openshift.common.is_containerized | bool  - name: Build Bind image @@ -22,13 +21,8 @@    template:      dest: "/etc/systemd/system/named.service"      src: named.service.j2 -  register: install_result    when: openshift.common.is_containerized | bool -- name: reload systemd -  command: /usr/bin/systemctl --system daemon-reload -  when: openshift.common.is_containerized | bool and install_result | changed -  - name: Create bind zone dir    file: path=/var/named state=directory    when: openshift.common.is_containerized | bool @@ -45,7 +39,8 @@    notify: restart bind  - name: Enable Bind -  service: +  systemd:      name: named      state: started      enabled: yes +    daemon_reload: yes diff --git a/roles/docker/README.md b/roles/docker/README.md index 1f0d94da0..ea06fd41a 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -6,7 +6,7 @@ Ensures docker package is installed, and optionally raises timeout for systemd-u  Requirements  ------------ -None +Ansible 2.2  Role Variables  -------------- diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index aff905bc8..9ccb306fc 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -1,12 +1,13 @@  ---  - name: restart docker -  service: +  systemd:      name: docker      state: restarted    when: not docker_service_status_changed | default(false) | bool  - name: restart udev -  service: +  systemd:      name: systemd-udevd      state: restarted +    daemon_reload: yes diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index 3d362158d..c5c95c0d2 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: docker package install    company: Red Hat, Inc    license: ASL 2.0 -  min_ansible_version: 1.2 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml index aa7af0cb3..257c3123d 100644 --- a/roles/docker/tasks/udev_workaround.yml +++ b/roles/docker/tasks/udev_workaround.yml @@ -21,10 +21,4 @@      owner: root      mode: "0644"    notify: -  - restart udev -  register: udevw_override_conf - -- name: reload systemd config files -  command: systemctl daemon-reload -  when: udevw_override_conf | changed -  +    - restart udev diff --git a/roles/etcd/README.md b/roles/etcd/README.md index 329a926c0..c936dbabc 100644 --- a/roles/etcd/README.md +++ b/roles/etcd/README.md @@ -6,7 +6,8 @@ Configures an etcd cluster for an arbitrary number of hosts  Requirements  ------------ -This role assumes it's being deployed on a RHEL/Fedora based host with package +* Ansible 2.2 +* This role assumes it's being deployed on a RHEL/Fedora based host with package  named 'etcd' available via yum or dnf (conditionally).  Role Variables diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index e00e1cac4..95076b19e 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -1,5 +1,5 @@  ---  - name: restart etcd -  service: name={{ etcd_service }} state=restarted +  systemd: name={{ etcd_service }} state=restarted    when: not (etcd_service_status_changed | default(false) | bool) diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index cfd72dfbc..532f9e313 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -7,7 +7,7 @@ galaxy_info:    description: etcd management    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 2.1 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 7b61e9b73..41f25be70 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -20,36 +20,25 @@    template:      dest: "/etc/systemd/system/etcd_container.service"      src: etcd.docker.service -  register: install_etcd_result    when: etcd_is_containerized | bool -- name: Ensure etcd datadir exists -  when: etcd_is_containerized | bool +- name: Ensure etcd datadir exists when containerized    file:      path: "{{ etcd_data_dir }}"      state: directory      mode: 0700 - -- name: Check for etcd service presence -  command: systemctl show etcd.service -  register: etcd_show -  changed_when: false -  failed_when: false +  when: etcd_is_containerized | bool  - name: Disable system etcd when containerized -  when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout -  service: +  systemd:      name: etcd      state: stopped      enabled: no - -- name: Mask system etcd when containerized -  when: etcd_is_containerized | bool and etcd_show.rc == 0 and 'LoadState=not-found' not in etcd_show.stdout -  command: systemctl mask etcd - -- name: Reload systemd units -  command: systemctl daemon-reload -  when: etcd_is_containerized | bool and ( install_etcd_result | changed ) +    masked: yes +    daemon_reload: yes +  when: etcd_is_containerized | bool +  register: task_result +  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"  - name: Validate permissions on the config dir    file: @@ -68,7 +57,7 @@      - restart etcd  - name: Enable etcd -  service: +  systemd:      name: "{{ etcd_service }}"      state: started      enabled: yes @@ -77,5 +66,6 @@  - include: etcdctl.yml    when: openshift_etcd_etcdctl_profile | default(true) | bool -- set_fact: +- name: Set fact etcd_service_status_changed +  set_fact:      etcd_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index cf957ede8..ae059b549 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -7,7 +7,7 @@ PartOf=docker.service  [Service]  EnvironmentFile=/etc/etcd/etcd.conf  ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:z --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}  ExecStop=/usr/bin/docker stop {{ etcd_service }}  SyslogIdentifier=etcd_container  Restart=always diff --git a/roles/flannel/README.md b/roles/flannel/README.md index 84e2c5c49..0c7347603 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -6,7 +6,8 @@ Configure flannel on openshift nodes  Requirements  ------------ -This role assumes it's being deployed on a RHEL/Fedora based host with package +* Ansible 2.2 +* This role assumes it's being deployed on a RHEL/Fedora based host with package  named 'flannel' available via yum or dnf (conditionally), in version superior  to 0.3. diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 981ea5c7a..94d1d18fb 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -1,8 +1,8 @@  ---  - name: restart flanneld    become: yes -  service: name=flanneld state=restarted +  systemd: name=flanneld state=restarted  - name: restart docker    become: yes -  service: name=docker state=restarted +  systemd: name=docker state=restarted diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 616ae61d2..35f825586 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: flannel management    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 2.1 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index a51455bae..3a8945a82 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -27,7 +27,7 @@  - name: Enable flanneld    become: yes -  service: +  systemd:      name: flanneld      state: started      enabled: yes diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md index dd91ad8b1..8cf7c0cd4 100644 --- a/roles/kube_nfs_volumes/README.md +++ b/roles/kube_nfs_volumes/README.md @@ -11,8 +11,8 @@ system) on the disks!  ## Requirements +* Ansible 2.2  * Running Kubernetes with NFS persistent volume support (on a remote machine). -  * Works only on RHEL/Fedora-like distros.  ## Role Variables diff --git a/roles/kube_nfs_volumes/handlers/main.yml b/roles/kube_nfs_volumes/handlers/main.yml index 52f3ceffe..9ce8b783d 100644 --- a/roles/kube_nfs_volumes/handlers/main.yml +++ b/roles/kube_nfs_volumes/handlers/main.yml @@ -1,3 +1,3 @@  ---  - name: restart nfs -  service: name=nfs-server state=restarted +  systemd: name=nfs-server state=restarted diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml index dc4ccdfee..be6ca6b88 100644 --- a/roles/kube_nfs_volumes/meta/main.yml +++ b/roles/kube_nfs_volumes/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: Partition disks and use them as Kubernetes NFS physical volumes.    company: Red Hat, Inc.    license: license (Apache) -  min_ansible_version: 1.4 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index ebd3d349a..9eeff9260 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -4,10 +4,16 @@    when: not openshift.common.is_containerized | bool  - name: Start rpcbind on Fedora/Red Hat -  service: name=rpcbind state=started enabled=yes +  systemd: +    name: rpcbind +    state: started +    enabled: yes  - name: Start nfs on Fedora/Red Hat -  service: name=nfs-server state=started enabled=yes +  systemd: +    name: nfs-server +    state: started +    enabled: yes  - name: Export the directories    lineinfile: dest=/etc/exports diff --git a/roles/nuage_master/README.md b/roles/nuage_master/README.md index de101dd19..0f1f6f2b1 100644 --- a/roles/nuage_master/README.md +++ b/roles/nuage_master/README.md @@ -5,4 +5,6 @@ Setup Nuage Kubernetes Monitor on the Master node  Requirements  ------------ -This role assumes it has been deployed on RHEL/Fedora + +* Ansible 2.2 +* This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 56224cf82..162aaae1a 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,18 +1,24 @@  ---  - name: restart nuage-openshift-monitor    become: yes -  service: name=nuage-openshift-monitor state=restarted +  systemd: name=nuage-openshift-monitor state=restarted  - name: restart master -  service: name={{ openshift.common.service_type }}-master state=restarted +  systemd: name={{ openshift.common.service_type }}-master state=restarted    when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))  - name: restart master api -  service: name={{ openshift.common.service_type }}-master-api state=restarted -  when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' +  systemd: name={{ openshift.common.service_type }}-master-api state=restarted +  when: > +    (openshift_master_ha | bool) and +    (not master_api_service_status_changed | default(false)) and +    openshift.master.cluster_method == 'native'  # TODO: need to fix up ignore_errors here  - name: restart master controllers -  service: name={{ openshift.common.service_type }}-master-controllers state=restarted -  when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native' +  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted +  when: > +    (openshift_master_ha | bool) and +    (not master_controllers_service_status_changed | default(false)) and +    openshift.master.cluster_method == 'native'    ignore_errors: yes diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index 51b89fbf6..b2a47ef71 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -1,10 +1,10 @@  ---  galaxy_info: -  author: Vishal Patil  +  author: Vishal Patil    description:    company: Nuage Networks    license: Apache License, Version 2.0 -  min_ansible_version: 1.8 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: @@ -18,5 +18,5 @@ dependencies:    - role: openshift_etcd_client_certificates    - role: os_firewall      os_firewall_allow: -    - service: openshift-monitor  +    - service: openshift-monitor        port: "{{ nuage_mon_rest_server_port }}/tcp" diff --git a/roles/nuage_node/README.md b/roles/nuage_node/README.md index 02a3cbc77..75a75ca6b 100644 --- a/roles/nuage_node/README.md +++ b/roles/nuage_node/README.md @@ -6,4 +6,5 @@ Setup Nuage VRS (Virtual Routing Switching) on the Openshift Node  Requirements  ------------ -This role assumes it has been deployed on RHEL/Fedora +* Ansible 2.2 +* This role assumes it has been deployed on RHEL/Fedora diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml index fd06d9025..8384856ff 100644 --- a/roles/nuage_node/handlers/main.yaml +++ b/roles/nuage_node/handlers/main.yaml @@ -1,11 +1,11 @@  ---  - name: restart vrs    become: yes -  service: name=openvswitch state=restarted +  systemd: name=openvswitch state=restarted  - name: restart node    become: yes -  service: name={{ openshift.common.service_type }}-node state=restarted +  systemd: name={{ openshift.common.service_type }}-node state=restarted  - name: save iptable rules    become: yes diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml index a6fbcba61..f96318611 100644 --- a/roles/nuage_node/meta/main.yml +++ b/roles/nuage_node/meta/main.yml @@ -1,10 +1,10 @@  ---  galaxy_info: -  author: Vishal Patil  +  author: Vishal Patil    description:    company: Nuage Networks    license: Apache License, Version 2.0 -  min_ansible_version: 1.8 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: @@ -17,7 +17,7 @@ dependencies:    - role: nuage_ca    - role: os_firewall      os_firewall_allow: -    - service: vxlan  +    - service: vxlan        port: 4789/udp      - service: nuage-monitor        port: "{{ nuage_mon_rest_server_port }}/tcp" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d797eb4d3..ad4b1e47b 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -22,9 +22,14 @@ from distutils.util import strtobool  from distutils.version import LooseVersion  import struct  import socket -from dbus import SystemBus, Interface -from dbus.exceptions import DBusException +HAVE_DBUS=False +try: +    from dbus import SystemBus, Interface +    from dbus.exceptions import DBusException +    HAVE_DBUS=True +except ImportError: +    pass  DOCUMENTATION = '''  --- @@ -102,14 +107,6 @@ def migrate_node_facts(facts):                      facts['node'][param] = facts[role].pop(param)      return facts -def migrate_local_facts(facts): -    """ Apply migrations of local facts """ -    migrated_facts = copy.deepcopy(facts) -    migrated_facts = migrate_docker_facts(migrated_facts) -    migrated_facts = migrate_common_facts(migrated_facts) -    migrated_facts = migrate_node_facts(migrated_facts) -    migrated_facts = migrate_hosted_facts(migrated_facts) -    return migrated_facts  def migrate_hosted_facts(facts):      """ Apply migrations for master facts """ @@ -128,6 +125,30 @@ def migrate_hosted_facts(facts):              facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')      return facts +def migrate_admission_plugin_facts(facts): +    if 'master' in facts: +        if 'kube_admission_plugin_config' in facts['master']: +            if 'admission_plugin_config' not in facts['master']: +                facts['master']['admission_plugin_config'] = dict() +            # Merge existing kube_admission_plugin_config with admission_plugin_config. +            facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'], +                                                                     facts['master']['kube_admission_plugin_config'], +                                                                     additive_facts_to_overwrite=[], +                                                                     protected_facts_to_overwrite=[]) +            # Remove kube_admission_plugin_config fact +            facts['master'].pop('kube_admission_plugin_config', None) +    return facts + +def migrate_local_facts(facts): +    """ Apply migrations of local facts """ +    migrated_facts = copy.deepcopy(facts) +    migrated_facts = migrate_docker_facts(migrated_facts) +    migrated_facts = migrate_common_facts(migrated_facts) +    migrated_facts = migrate_node_facts(migrated_facts) +    migrated_facts = migrate_hosted_facts(migrated_facts) +    migrated_facts = migrate_admission_plugin_facts(migrated_facts) +    return migrated_facts +  def first_ip(network):      """ Return the first IPv4 address in network @@ -1567,14 +1588,14 @@ def set_proxy_facts(facts):              builddefaults['git_http_proxy'] = builddefaults['http_proxy']          if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:              builddefaults['git_https_proxy'] = builddefaults['https_proxy'] -        # If we're actually defining a proxy config then create kube_admission_plugin_config +        # If we're actually defining a proxy config then create admission_plugin_config          # if it doesn't exist, then merge builddefaults[config] structure -        # into kube_admission_plugin_config -        if 'kube_admission_plugin_config' not in facts['master']: -            facts['master']['kube_admission_plugin_config'] = dict() +        # into admission_plugin_config +        if 'admission_plugin_config' not in facts['master']: +            facts['master']['admission_plugin_config'] = dict()          if 'config' in builddefaults and ('http_proxy' in builddefaults or \                  'https_proxy' in builddefaults): -            facts['master']['kube_admission_plugin_config'].update(builddefaults['config']) +            facts['master']['admission_plugin_config'].update(builddefaults['config'])          facts['builddefaults'] = builddefaults      return facts @@ -2277,6 +2298,9 @@ def main():          add_file_common_args=True,      ) +    if not HAVE_DBUS: +        module.fail_json(msg="This module requires dbus python bindings") +      module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']      module.params['gather_timeout'] = 10      module.params['filter'] = '*' diff --git a/roles/openshift_loadbalancer/README.md b/roles/openshift_loadbalancer/README.md index 03e837e46..bea4c509b 100644 --- a/roles/openshift_loadbalancer/README.md +++ b/roles/openshift_loadbalancer/README.md @@ -6,6 +6,8 @@ OpenShift HaProxy Loadbalancer Configuration  Requirements  ------------ +* Ansible 2.2 +  This role is intended to be applied to the [lb] host group which is  separate from OpenShift infrastructure components. diff --git a/roles/openshift_loadbalancer/handlers/main.yml b/roles/openshift_loadbalancer/handlers/main.yml index 5b8691b26..3bf052460 100644 --- a/roles/openshift_loadbalancer/handlers/main.yml +++ b/roles/openshift_loadbalancer/handlers/main.yml @@ -1,6 +1,6 @@  ---  - name: restart haproxy -  service: +  systemd:      name: haproxy      state: restarted    when: not (haproxy_start_result_changed | default(false) | bool) diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml index 0b29df2a0..0dffb545f 100644 --- a/roles/openshift_loadbalancer/meta/main.yml +++ b/roles/openshift_loadbalancer/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: OpenShift haproxy loadbalancer    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.9 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 1d2804279..400f80715 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -27,11 +27,6 @@      option: LimitNOFILE      value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"    notify: restart haproxy -  register: nofile_limit_result - -- name: Reload systemd if needed -  command: systemctl daemon-reload -  when: nofile_limit_result | changed  - name: Configure haproxy    template: @@ -43,10 +38,11 @@    notify: restart haproxy  - name: Enable and start haproxy -  service: +  systemd:      name: haproxy      state: started      enabled: yes +    daemon_reload: yes    register: start_result  - set_fact: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 663ac08b8..c3300a7ef 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -6,7 +6,8 @@ Master service installation  Requirements  ------------ -A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, +* Ansible 2.2 +* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,  rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos.  Role Variables diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index e119db1a2..69c5a1663 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,16 +1,16 @@  ---  - name: restart master -  service: name={{ openshift.common.service_type }}-master state=restarted +  systemd: name={{ openshift.common.service_type }}-master state=restarted    when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))    notify: Verify API Server  - name: restart master api -  service: name={{ openshift.common.service_type }}-master-api state=restarted +  systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers -  service: name={{ openshift.common.service_type }}-master-controllers state=restarted +  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted    when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index a2f665702..7457e4378 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: Master    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 2.1 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 79c62e985..2de5cd3f3 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -64,9 +64,9 @@    args:      creates: "{{ openshift_master_policy }}"    notify: -  - restart master -  - restart master api -  - restart master controllers +    - restart master +    - restart master api +    - restart master controllers  - name: Create the scheduler config    copy: @@ -74,9 +74,9 @@      dest: "{{ openshift_master_scheduler_conf }}"      backup: true    notify: -  - restart master -  - restart master api -  - restart master controllers +    - restart master +    - restart master api +    - restart master controllers  - name: Install httpd-tools if needed    package: name=httpd-tools state=present @@ -147,8 +147,8 @@      mode: 0600    when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined    notify: -  - restart master -  - restart master api +    - restart master +    - restart master api  - set_fact:      translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}" @@ -163,9 +163,9 @@      group: root      mode: 0600    notify: -  - restart master -  - restart master api -  - restart master controllers +    - restart master +    - restart master api +    - restart master controllers  - include: set_loopback_context.yml    when: openshift.common.version_gte_3_2_or_1_2 @@ -179,7 +179,10 @@  # https://github.com/openshift/origin/issues/6065  # https://github.com/openshift/origin/issues/6447  - name: Start and enable master -  service: name={{ openshift.common.service_type }}-master enabled=yes state=started +  systemd: +    name: "{{ openshift.common.service_type }}-master" +    enabled: yes +    state: started    when: not openshift_master_ha | bool    register: start_result    until: not start_result | failed @@ -187,29 +190,30 @@    delay: 60    notify: Verify API Server -- name: Check for non-HA master service presence -  command: systemctl show {{ openshift.common.service_type }}-master.service -  register: master_svc_show -  changed_when: false -  failed_when: false -  - name: Stop and disable non-HA master when running HA -  service: +  systemd:      name: "{{ openshift.common.service_type }}-master"      enabled: no      state: stopped -  when: openshift_master_ha | bool and master_svc_show.rc == 0 and 'LoadState=not-found' not in master_svc_show.stdout +  when: openshift_master_ha | bool +  register: task_result +  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"  - set_fact:      master_service_status_changed: "{{ start_result | changed }}"    when: not openshift_master_ha | bool  - name: Mask master service -  command: systemctl mask {{ openshift.common.service_type }}-master -  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and not openshift.common.is_containerized | bool +  systemd: +    name: "{{ openshift.common.service_type }}-master" +    masked: yes +  when: > +    openshift_master_ha | bool and +    openshift.master.cluster_method == 'native' and +    not openshift.common.is_containerized | bool  - name: Start and enable master api on first master -  service: +  systemd:      name: "{{ openshift.common.service_type }}-master-api"      enabled: yes      state: started @@ -228,7 +232,7 @@    when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'  - name: Start and enable master api all masters -  service: +  systemd:      name: "{{ openshift.common.service_type }}-master-api"      enabled: yes      state: started @@ -264,7 +268,7 @@    when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool  - name: Start and enable master controller on first master -  service: +  systemd:      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started @@ -274,12 +278,13 @@    retries: 1    delay: 60 -- pause: +- name: Wait for master controller service to start on first master +  pause:      seconds: 15    when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'  - name: Start and enable master controller on all masters -  service: +  systemd:      name: "{{ openshift.common.service_type }}-master-controllers"      enabled: yes      state: started @@ -300,7 +305,10 @@    register: install_result  - name: Start and enable cluster service -  service: name=pcsd enabled=yes state=started +  systemd: +    name: pcsd +    enabled: yes +    state: started    when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'      and not openshift.common.is_containerized | bool diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index a52ae578c..dc9226a5a 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,7 +1,4 @@  admissionConfig: -{% if 'admission_plugin_order' in openshift.master %} -  pluginOrderOverride:{{ openshift.master.admission_plugin_order | to_padded_yaml(level=2) }} -{% endif %}  {% if 'admission_plugin_config' in openshift.master %}    pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }}  {% endif %} @@ -116,13 +113,6 @@ kubernetesMasterConfig:    - v1beta3    - v1  {% endif %} -  admissionConfig: -{% if 'kube_admission_plugin_order' in openshift.master %} -    pluginOrderOverride:{{ openshift.master.kube_admission_plugin_order | to_padded_yaml(level=3) }} -{% endif %} -{% if 'kube_admission_plugin_config' in openshift.master %} -    pluginConfig:{{ openshift.master.kube_admission_plugin_config | to_padded_yaml(level=3) }} -{% endif %}    apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}    controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}    masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }} diff --git a/roles/openshift_master_cluster/README.md b/roles/openshift_master_cluster/README.md index f150981fa..58dd19ac3 100644 --- a/roles/openshift_master_cluster/README.md +++ b/roles/openshift_master_cluster/README.md @@ -6,7 +6,7 @@ TODO  Requirements  ------------ -TODO +* Ansible 2.2  Role Variables  -------------- diff --git a/roles/openshift_master_cluster/meta/main.yml b/roles/openshift_master_cluster/meta/main.yml index 0c8881521..f2a67bc54 100644 --- a/roles/openshift_master_cluster/meta/main.yml +++ b/roles/openshift_master_cluster/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description:    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.8 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 62ac1aef5..1f27a2c1d 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -66,10 +66,8 @@        master_image: "{{ osm_image | default(None) }}"        scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}"        scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}" -      admission_plugin_order: "{{openshift_master_admission_plugin_order | default(None) }}"        admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}" -      kube_admission_plugin_order: "{{openshift_master_kube_admission_plugin_order | default(None) }}" -      kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" +      kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config        oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2        oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"        oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}" diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md index 30a0a608d..f3c0f3474 100644 --- a/roles/openshift_metrics/README.md +++ b/roles/openshift_metrics/README.md @@ -5,8 +5,10 @@ OpenShift Metrics Installation  Requirements  ------------ -It requires subdomain fqdn to be set. -If persistence is enabled, then it also requires NFS. + +* Ansible 2.2 +* It requires subdomain fqdn to be set. +* If persistence is enabled, then it also requires NFS.  Role Variables  -------------- diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index e119db1a2..69c5a1663 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -1,16 +1,16 @@  ---  - name: restart master -  service: name={{ openshift.common.service_type }}-master state=restarted +  systemd: name={{ openshift.common.service_type }}-master state=restarted    when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))    notify: Verify API Server  - name: restart master api -  service: name={{ openshift.common.service_type }}-master-api state=restarted +  systemd: name={{ openshift.common.service_type }}-master-api state=restarted    when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'    notify: Verify API Server  - name: restart master controllers -  service: name={{ openshift.common.service_type }}-master-controllers state=restarted +  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted    when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'  - name: Verify API Server diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml index 5f8d4f5c5..a89467de5 100644 --- a/roles/openshift_metrics/meta/main.yaml +++ b/roles/openshift_metrics/meta/main.yaml @@ -1,3 +1,17 @@ +--- +galaxy_info: +  author: David MartÃn +  description: +  company: +  license: Apache License, Version 2.0 +  min_ansible_version: 2.2 +  platforms: +  - name: EL +    versions: +    - 7 +  categories: +  - cloud +  - system  dependencies:  - { role: openshift_examples } -- { role: openshift_facts }
\ No newline at end of file +- { role: openshift_facts } diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index cafecd343..d1920c485 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -6,10 +6,10 @@ Node service installation  Requirements  ------------ -One or more Master servers. - -A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, -rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos. +* Ansible 2.2 +* One or more Master servers +* A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms, +rhel-7-server-extras-rpms, and rhel-7-server-ose-3.0-rpms repos  Role Variables  -------------- diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 34071964a..ebe584588 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,6 +1,6 @@  ---  - name: restart openvswitch -  service: name=openvswitch state=restarted +  systemd: name=openvswitch state=restarted    when: not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool    notify:    - restart openvswitch pause @@ -10,5 +10,5 @@    when: openshift.common.is_containerized | bool  - name: restart node -  service: name={{ openshift.common.service_type }}-node state=restarted +  systemd: name={{ openshift.common.service_type }}-node state=restarted    when: not (node_service_status_changed | default(false) | bool) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 612cc0e20..31d07838d 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -2,35 +2,37 @@  # TODO: allow for overriding default ports where possible  - fail:      msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." -  when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] +  when: > +    (not ansible_selinux or ansible_selinux.status != 'enabled') and +    deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']  - name: Set node facts    openshift_facts:      role: "{{ item.role }}"      local_facts: "{{ item.local_facts }}"    with_items: -  # Reset node labels to an empty dictionary. -  - role: node -    local_facts: -      labels: {} -  - role: node -    local_facts: -      annotations: "{{ openshift_node_annotations | default(none) }}" -      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" -      iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" -      kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" -      labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" -      registry_url: "{{ oreg_url | default(none) }}" -      schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" -      sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" -      storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" -      set_node_ip: "{{ openshift_set_node_ip | default(None) }}" -      node_image: "{{ osn_image | default(None) }}" -      ovs_image: "{{ osn_ovs_image | default(None) }}" -      proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" -      local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" -      dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" -      env_vars: "{{ openshift_node_env_vars | default(None) }}" +    # Reset node labels to an empty dictionary. +    - role: node +      local_facts: +        labels: {} +    - role: node +      local_facts: +        annotations: "{{ openshift_node_annotations | default(none) }}" +        debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" +        iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" +        kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" +        labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}" +        registry_url: "{{ oreg_url | default(none) }}" +        schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" +        sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" +        storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" +        set_node_ip: "{{ openshift_set_node_ip | default(None) }}" +        node_image: "{{ osn_image | default(None) }}" +        ovs_image: "{{ osn_ovs_image | default(None) }}" +        proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" +        local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" +        dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}" +        env_vars: "{{ openshift_node_env_vars | default(None) }}"  # We have to add tuned-profiles in the same transaction otherwise we run into depsolving  # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. @@ -80,7 +82,10 @@    sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes  - name: Start and enable openvswitch docker service -  service: name=openvswitch.service enabled=yes state=started +  systemd: +    name: openvswitch.service +    enabled: yes +    state: started    when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool    register: ovs_start_result @@ -102,7 +107,7 @@      group: root      mode: 0600    notify: -  - restart node +    - restart node  - name: Configure AWS Cloud Provider Settings    lineinfile: @@ -118,7 +123,7 @@    no_log: True    when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"    notify: -  - restart node +    - restart node  - name: Configure Node Environment Variables    lineinfile: @@ -128,7 +133,7 @@      create: true    with_dict: "{{ openshift.node.env_vars | default({}) }}"    notify: -  - restart node +    - restart node  - name: NFS storage plugin configuration    include: storage_plugins/nfs.yml @@ -168,11 +173,17 @@    when: openshift.common.is_containerized | bool  - name: Start and enable node dep -  service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started +  systemd: +    name: "{{ openshift.common.service_type }}-node-dep" +    enabled: yes +    state: started    when: openshift.common.is_containerized | bool  - name: Start and enable node -  service: name={{ openshift.common.service_type }}-node enabled=yes state=started +  systemd: +    name: "{{ openshift.common.service_type }}-node" +    enabled: yes +    state: started    register: node_start_result    until: not node_start_result | failed    retries: 1 diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md index f56066b29..f4215950f 100644 --- a/roles/openshift_node_certificates/README.md +++ b/roles/openshift_node_certificates/README.md @@ -6,6 +6,8 @@ This role determines if OpenShift node certificates must be created, delegates c  Requirements  ------------ +* Ansible 2.2 +  Role Variables  -------------- diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index f2299cecf..a74668b13 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -2,9 +2,9 @@  - name: update ca trust    command: update-ca-trust    notify: -  - restart docker after updating ca trust +    - restart docker after updating ca trust  - name: restart docker after updating ca trust -  service: +  systemd:      name: docker      state: restarted diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 50a862ee9..93216c1d2 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: OpenShift Node Certificates    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 2.1 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml index 7d43b6106..b4a0c3583 100644 --- a/roles/openshift_node_dnsmasq/handlers/main.yml +++ b/roles/openshift_node_dnsmasq/handlers/main.yml @@ -1,10 +1,10 @@  ---  - name: restart NetworkManager -  service: +  systemd:      name: NetworkManager      state: restarted  - name: restart dnsmasq -  service: +  systemd:      name: dnsmasq      state: restarted diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml index c83d64ae4..18e04e06d 100644 --- a/roles/openshift_node_dnsmasq/meta/main.yml +++ b/roles/openshift_node_dnsmasq/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: OpenShift Node DNSMasq support    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.7 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml index 0167b02b1..3311f7006 100644 --- a/roles/openshift_node_dnsmasq/tasks/main.yml +++ b/roles/openshift_node_dnsmasq/tasks/main.yml @@ -22,16 +22,16 @@  - name: Deploy additional dnsmasq.conf    template: -   src: "{{ openshift_node_dnsmasq_additional_config_file }}" -   dest: /etc/dnsmasq.d/openshift-ansible.conf -   owner: root -   group: root -   mode: 0644 +    src: "{{ openshift_node_dnsmasq_additional_config_file }}" +    dest: /etc/dnsmasq.d/openshift-ansible.conf +    owner: root +    group: root +    mode: 0644    when: openshift_node_dnsmasq_additional_config_file is defined    notify: restart dnsmasq  - name: Enable dnsmasq -  service: +  systemd:      name: dnsmasq      enabled: yes      state: started diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md index b0480a958..817b007e8 100644 --- a/roles/openshift_storage_nfs/README.md +++ b/roles/openshift_storage_nfs/README.md @@ -6,10 +6,10 @@ OpenShift NFS Server Installation  Requirements  ------------ -This role is intended to be applied to the [nfs] host group which is +* Ansible 2.2 +* This role is intended to be applied to the [nfs] host group which is  separate from OpenShift infrastructure components. - -Requires access to the 'nfs-utils' package. +* Requires access to the 'nfs-utils' package.  Role Variables  -------------- diff --git a/roles/openshift_storage_nfs/handlers/main.yml b/roles/openshift_storage_nfs/handlers/main.yml index a1377a203..0d1149db8 100644 --- a/roles/openshift_storage_nfs/handlers/main.yml +++ b/roles/openshift_storage_nfs/handlers/main.yml @@ -1,6 +1,6 @@  ---  - name: restart nfs-server -  service: +  systemd:      name: nfs-server      state: restarted    when: not (nfs_service_status_changed | default(false)) diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index 865865d9c..62e38bd8c 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: OpenShift NFS Server    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.9 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index ecc52e4af..fd935f105 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -10,7 +10,7 @@    register: nfs_config  - name: Restart nfs-config -  service: name=nfs-config state=restarted +  systemd: name=nfs-config state=restarted    when: nfs_config | changed  - name: Ensure exports directory exists @@ -26,9 +26,9 @@      owner: nfsnobody      group: nfsnobody    with_items: -  - "{{ openshift.hosted.registry }}" -  - "{{ openshift.hosted.metrics }}" -  - "{{ openshift.hosted.logging }}" +    - "{{ openshift.hosted.registry }}" +    - "{{ openshift.hosted.metrics }}" +    - "{{ openshift.hosted.logging }}"  - name: Configure exports @@ -36,7 +36,7 @@      dest: /etc/exports.d/openshift-ansible.exports      src: exports.j2    notify: -  - restart nfs-server +    - restart nfs-server  # Now that we're putting our exports in our own file clean up the old ones  - name: register exports @@ -51,16 +51,14 @@    with_items: "{{ exports_out.stdout_lines | default([]) }}"    when: exports_out.rc == 0    notify: -  - restart nfs-server +    - restart nfs-server  - name: Enable and start services -  service: -    name: "{{ item }}" +  systemd: +    name: nfs-server      state: started      enabled: yes    register: start_result -  with_items: -  - nfs-server  - set_fact:      nfs_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md index 3680ef5b5..8b8471745 100644 --- a/roles/openshift_storage_nfs_lvm/README.md +++ b/roles/openshift_storage_nfs_lvm/README.md @@ -8,10 +8,9 @@ create persistent volumes.  ## Requirements -* NFS server with NFS, iptables, and everything setup. - +* Ansible 2.2 +* NFS server with NFS, iptables, and everything setup  * A lvm volume group created on the nfs server (default: openshiftvg) -  * The lvm volume needs to have as much free space as you are allocating  ## Role Variables diff --git a/roles/openshift_storage_nfs_lvm/handlers/main.yml b/roles/openshift_storage_nfs_lvm/handlers/main.yml index 52f3ceffe..9ce8b783d 100644 --- a/roles/openshift_storage_nfs_lvm/handlers/main.yml +++ b/roles/openshift_storage_nfs_lvm/handlers/main.yml @@ -1,3 +1,3 @@  ---  - name: restart nfs -  service: name=nfs-server state=restarted +  systemd: name=nfs-server state=restarted diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml index 62ea54883..bed1216f8 100644 --- a/roles/openshift_storage_nfs_lvm/meta/main.yml +++ b/roles/openshift_storage_nfs_lvm/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: Create LVM volumes and use them as openshift persistent volumes.    company: Red Hat, Inc.    license: license (Apache) -  min_ansible_version: 1.4 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index e0be9f0b7..03f4fcec0 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -4,14 +4,23 @@    when: not openshift.common.is_containerized | bool  - name: Start rpcbind -  service: name=rpcbind state=started enabled=yes +  systemd: +    name: rpcbind +    state: started +    enabled: yes  - name: Start nfs -  service: name=nfs-server state=started enabled=yes +  systemd: +    name: nfs-server +    state: started +    enabled: yes  - name: Export the directories    lineinfile: dest=/etc/exports                regexp="^{{ osnl_mount_dir }}/{{ item }} "                line="{{ osnl_mount_dir }}/{{ item }} {{osnl_nfs_export_options}}" -  with_sequence: start={{osnl_volume_num_start}} count={{osnl_number_of_volumes}} format={{osnl_volume_prefix}}{{osnl_volume_size}}g%04d +  with_sequence: +    start: "{{osnl_volume_num_start}}" +    count: "{{osnl_number_of_volumes}}" +    format: "{{osnl_volume_prefix}}{{osnl_volume_size}}g%04d"    notify: restart nfs diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md index bb7fc2384..c13c5dfc9 100644 --- a/roles/os_firewall/README.md +++ b/roles/os_firewall/README.md @@ -7,7 +7,7 @@ case (Adding/Removing rules based on protocol and port number).  Requirements  ------------ -None. +Ansible 2.2  Role Variables  -------------- diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml index 4cfc72011..dca5fc5ff 100644 --- a/roles/os_firewall/meta/main.yml +++ b/roles/os_firewall/meta/main.yml @@ -4,7 +4,7 @@ galaxy_info:    description: os_firewall    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.7 +  min_ansible_version: 2.2    platforms:      - name: EL        versions:  | 
