summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Bielawa <timbielawa@gmail.com>2016-12-12 11:39:44 -0800
committerGitHub <noreply@github.com>2016-12-12 11:39:44 -0800
commit7374505de2a11b94d22672b8da7e405b919a15bc (patch)
tree51881c3f16f1f7368bdbf62e0478a6c292a5470a
parent91fba8015e9e8035cca2444dbbc8954a27e2310e (diff)
parentbe97433dd559a3bdae4baedda20a7f17bd47450b (diff)
downloadopenshift-7374505de2a11b94d22672b8da7e405b919a15bc.tar.gz
openshift-7374505de2a11b94d22672b8da7e405b919a15bc.tar.bz2
openshift-7374505de2a11b94d22672b8da7e405b919a15bc.tar.xz
openshift-7374505de2a11b94d22672b8da7e405b919a15bc.zip
Merge pull request #2964 from mtnbikenc/linting-refactor
YAML Linting with CI checking
-rw-r--r--.travis.yml1
-rw-r--r--git/.yamllint67
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml2
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml1
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml3
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml8
-rw-r--r--playbooks/adhoc/noc/create_host.yml19
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml7
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml3
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml2
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml2
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml2
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml2
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/aws/openshift-cluster/config.yml4
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml30
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml84
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml3
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml38
-rw-r--r--playbooks/byo/rhel_subscribe.yml6
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml1
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml4
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml4
-rw-r--r--playbooks/common/openshift-master/restart.yml13
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml1
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml2
-rw-r--r--playbooks/common/openshift-node/service.yml2
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml23
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml20
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml36
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml1
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/docker/meta/main.yml4
-rw-r--r--roles/docker/tasks/main.yml18
-rw-r--r--roles/flannel_register/defaults/main.yaml1
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml2
-rw-r--r--roles/nuage_ca/meta/main.yml2
-rw-r--r--roles/nuage_common/defaults/main.yaml1
-rw-r--r--roles/nuage_master/defaults/main.yaml2
-rw-r--r--roles/nuage_master/meta/main.yml14
-rw-r--r--roles/nuage_master/tasks/certificates.yml8
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/meta/main.yml16
-rw-r--r--roles/nuage_node/tasks/certificates.yml6
-rw-r--r--roles/nuage_node/tasks/iptables.yml2
-rw-r--r--roles/nuage_node/tasks/main.yaml22
-rw-r--r--roles/nuage_node/vars/main.yaml4
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml3
-rw-r--r--roles/openshift_cloud_provider/tasks/aws.yml1
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml1
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rw-r--r--roles/openshift_examples/defaults/main.yml4
-rw-r--r--roles/openshift_expand_partition/meta/main.yml4
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml1
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml86
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml348
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml1
-rw-r--r--roles/openshift_manageiq/vars/main.yml53
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml4
-rw-r--r--roles/openshift_master_facts/vars/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_metrics/vars/main.yaml7
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml25
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_repos/vars/main.yml2
-rw-r--r--roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml3
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml1
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml2
-rw-r--r--roles/rhel_subscribe/meta/main.yml3
-rw-r--r--utils/Makefile16
-rw-r--r--utils/test-requirements.txt1
96 files changed, 658 insertions, 577 deletions
diff --git a/.travis.yml b/.travis.yml
index c88214bc2..b5b7a2a59 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,4 @@
+---
sudo: false
language: python
diff --git a/git/.yamllint b/git/.yamllint
new file mode 100644
index 000000000..573321a94
--- /dev/null
+++ b/git/.yamllint
@@ -0,0 +1,67 @@
+# -*- mode: yaml -*-
+# vim:ts=2:sw=2:ai:si:syntax=yaml
+#
+# yamllint configuration directives
+# Project Homepage: https://github.com/adrienverge/yamllint
+#
+# Overriding rules in files:
+# http://yamllint.readthedocs.io/en/latest/disable_with_comments.html
+---
+extends: default
+
+# Rules documentation: http://yamllint.readthedocs.io/en/latest/rules.html
+rules:
+
+ braces:
+ # Defaults
+ # min-spaces-inside: 0
+ # max-spaces-inside: 0
+
+ # Keeping 0 min-spaces to not error on empty collection definitions
+ min-spaces-inside: 0
+ # Allowing one space inside braces to improve code readability
+ max-spaces-inside: 1
+
+ brackets:
+ # Defaults
+ # min-spaces-inside: 0
+ # max-spaces-inside: 0
+
+ # Keeping 0 min-spaces to not error on empty collection definitions
+ min-spaces-inside: 0
+ # Allowing one space inside braces to improve code readability
+ max-spaces-inside: 1
+
+ comments:
+ # Defaults
+ # level: warning
+ # require-starting-space: true
+ # min-spaces-from-content: 2
+
+ # Disabling to allow for code comment blocks and #!/usr/bin/ansible-playbook
+ require-starting-space: false
+
+ indentation:
+ # Defaults
+ # spaces: consistent
+ # indent-sequences: true
+ # check-multi-line-strings: false
+
+ # Requiring 2 space indentation
+ spaces: 2
+ # Requiring consistent indentation within a file, either indented or not
+ indent-sequences: consistent
+
+ # Disabling due to copious amounts of long lines in the code which would
+ # require a code style change to resolve
+ line-length: disable
+ # Defaults
+ # max: 80
+ # allow-non-breakable-words: true
+ # allow-non-breakable-inline-mappings: false
+
+ # Disabling due to copious amounts of truthy warnings in the code which would
+ # require a code style change to resolve
+ truthy: disable
+ # Defaults
+ # level: warning
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index 5a5a00ea4..3c157bbf3 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -19,7 +19,7 @@
changed_when: False
failed_when: False
- - shell: docker images -q |xargs docker rmi
+ - shell: docker images -q |xargs docker rmi
changed_when: False
failed_when: False
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index b370d7fba..f12885b3a 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,3 +1,4 @@
+---
- hosts: OSEv3
gather_facts: false
tasks:
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index 4d32fc40b..f638fab83 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -56,7 +56,7 @@
- name: fail if we don't detect loopback
fail:
- msg: loopback not detected! Please investigate manually.
+ msg: loopback not detected! Please investigate manually.
when: loop_device_check.rc == 1
- name: stop zagg client monitoring container
@@ -139,4 +139,3 @@
register: dockerstart
- debug: var=dockerstart
-
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index 1438fd7d5..d988a28b0 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -43,7 +43,7 @@
- name: fail if we don't detect loopback
fail:
- msg: loopback not detected! Please investigate manually.
+ msg: loopback not detected! Please investigate manually.
when: loop_device_check.rc == 1
- name: stop zagg client monitoring container
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index d24e9cafa..598f1966d 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -37,7 +37,7 @@
vars:
cli_volume_type: gp2
cli_volume_size: 200
-# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+ #cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -65,7 +65,7 @@
- name: fail if we don't detect devicemapper
fail:
- msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
when: device_mapper_check.rc == 1
# docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
@@ -80,7 +80,7 @@
- name: fail if we don't find a docker volume group
fail:
- msg: Unable to find docker volume group. Please investigate manually.
+ msg: Unable to find docker volume group. Please investigate manually.
when: docker_vg_name.stdout_lines|length != 1
# docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
@@ -95,7 +95,7 @@
- name: fail if we don't find a docker physical volume
fail:
- msg: Unable to find docker physical volume. Please investigate manually.
+ msg: Unable to find docker physical volume. Please investigate manually.
when: docker_pv_name.stdout_lines|length != 1
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
index 2d2cae2b5..318396bcc 100644
--- a/playbooks/adhoc/noc/create_host.yml
+++ b/playbooks/adhoc/noc/create_host.yml
@@ -16,7 +16,7 @@
host: ctr_test_kwoodson
filter:
host:
- - ctr_kwoodson_test_tmpl
+ - ctr_kwoodson_test_tmpl
register: tmpl_results
@@ -39,21 +39,20 @@
params:
host: ctr_test_kwoodson
interfaces:
- - type: 1
- main: 1
- useip: 1
- ip: 127.0.0.1
- dns: ""
- port: 10050
+ - type: 1
+ main: 1
+ useip: 1
+ ip: 127.0.0.1
+ dns: ""
+ port: 10050
groups:
- - groupid: 1
+ - groupid: 1
templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
output: extend
filter:
host:
- - ctr_test_kwoodson
+ - ctr_test_kwoodson
register: host_results
- debug: var=host_results
-
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
index 8ad5fa0e2..b694aea1b 100644
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ b/playbooks/adhoc/noc/create_maintenance.yml
@@ -26,13 +26,12 @@
maintenance_type: "0"
output: extend
hostids: "{{ oo_hostids.split(',') | default([]) }}"
-#groupids: "{{ oo_groupids.split(',') | default([]) }}"
+ #groupids: "{{ oo_groupids.split(',') | default([]) }}"
timeperiods:
- - start_time: "{{ oo_start }}"
- period: "{{ oo_stop }}"
+ - start_time: "{{ oo_start }}"
+ period: "{{ oo_stop }}"
selectTimeperiods: extend
register: maintenance
- debug: var=maintenance
-
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index a3121d046..def1d24e0 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -2,5 +2,4 @@
- hosts: masters[0]
roles:
- role: openshift_hosted_logging
- openshift_hosted_logging_cleanup: no
-
+ openshift_hosted_logging_cleanup: no
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
index daf84e242..2c79a1b4d 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.yml
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -22,7 +22,7 @@
tasks:
- name: Check for AWS creds
- fail:
+ fail:
msg: "Couldn't find {{ item }} creds in ENV"
when: "{{ item }} == ''"
with_items:
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
index 08e8f8968..ae7d01730 100755
--- a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -7,7 +7,7 @@
- name: Check vars
hosts: localhost
gather_facts: false
-
+
pre_tasks:
- fail:
msg: "Playbook requires host to be set"
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index be1070f73..bdd92a47d 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -184,7 +184,7 @@
- docker.io/openshift
when: openshift_uninstall_images | default(True) | bool
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
changed_when: False
failed_when: False
with_items: "{{ images_to_delete.results }}"
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
index 09f7c76cc..955f990b7 100644
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -57,4 +57,4 @@
name: "{{ item }}"
state: absent
with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
- when: templ_heartbeat.results | length == 0
+ when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
index 2f1d003ff..0d5e01878 100755
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -15,5 +15,5 @@
ozb_server: "{{ g_server }}"
ozb_user: "{{ g_user }}"
ozb_password: "{{ g_password }}"
- ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
+ ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index 119b376aa..fbaf81dec 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 49e028396..d60b68885 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -17,8 +17,8 @@
- include: ../../common/openshift-cluster/config.yml
vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 4d76d3bfe..608512b79 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -125,21 +125,21 @@
- set_fact:
logrotate:
- - name: syslog
- path: |
- /var/log/cron
- /var/log/maillog
- /var/log/messages
- /var/log/secure
- /var/log/spooler"
- options:
- - daily
- - rotate 7
- - compress
- - sharedscripts
- - missingok
- scripts:
- postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
+ - name: syslog
+ path: |
+ /var/log/cron
+ /var/log/maillog
+ /var/log/messages
+ /var/log/secure
+ /var/log/spooler"
+ options:
+ - daily
+ - rotate 7
+ - compress
+ - sharedscripts
+ - missingok
+ scripts:
+ postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
- name: Add new instances groups and variables
add_host:
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 7a8375d0e..1f15aa4bf 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -29,49 +29,49 @@
become: no
gather_facts: no
tasks:
- - name: Remove tags from instances
- ec2_tag:
- resource: "{{ hostvars[item]['ec2_id'] }}"
- region: "{{ hostvars[item]['ec2_region'] }}"
- state: absent
- tags:
- environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
- clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
- host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
- sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
+ - name: Remove tags from instances
+ ec2_tag:
+ resource: "{{ hostvars[item]['ec2_id'] }}"
+ region: "{{ hostvars[item]['ec2_region'] }}"
+ state: absent
+ tags:
+ environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
+ clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
+ host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
+ sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
+ when: "'oo_hosts_to_terminate' in groups"
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ hostvars[item].ec2_id }}"]
- region: "{{ hostvars[item].ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ hostvars[item].ec2_id }}"]
+ region: "{{ hostvars[item].ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
+ when: "'oo_hosts_to_terminate' in groups"
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail:
- msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail:
+ msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+ with_items: "{{ ec2_term.results }}"
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+ with_items: "{{ ec2_term.results }}"
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: "{{ ec2_stop.results }}"
- when: ec2_stop | changed
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: "{{ ec2_stop.results }}"
+ when: ec2_stop | changed
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index 658204c17..cb464cf0d 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -1,19 +1,19 @@
---
-g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+g_etcd_hosts: "{{ groups.etcd | default([]) }}"
-g_lb_hosts: "{{ groups.lb | default([]) }}"
+g_lb_hosts: "{{ groups.lb | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
g_new_master_hosts: "{{ groups.new_masters | default([]) }}"
-g_node_hosts: "{{ groups.nodes | default([]) }}"
+g_node_hosts: "{{ groups.nodes | default([]) }}"
g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
-g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_nfs_hosts: "{{ groups.nfs | default([]) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | union(g_nfs_hosts)
- | union(g_new_node_hosts)| union(g_new_master_hosts)
- | default([]) }}"
+g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+ | union(g_lb_hosts) | union(g_nfs_hosts)
+ | union(g_new_node_hosts)| union(g_new_master_hosts)
+ | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 12c5566c4..0d451cf77 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,4 +1,4 @@
-
+---
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
@@ -43,4 +43,3 @@
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
-
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index a3ab78ccf..561be7859 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,3 +1,4 @@
+---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
#
# Currently only supports upgrading 1.9.x to >= 1.10.x.
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 7b915248b..4ce815271 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -97,4 +97,3 @@
node_config_hook: "v3_3/node_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index c9338a960..d6af71827 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -98,4 +98,3 @@
master_config_hook: "v3_3/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 4530f79cf..496b00697 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -93,4 +93,3 @@
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
index 8c810096f..344b22240 100644
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -13,24 +13,24 @@
- hosts: l_oo_all_hosts
become: yes
tasks:
- - name: install NetworkManager
- package:
- name: 'NetworkManager'
- state: present
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
- - name: configure NetworkManager
- lineinfile:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
- regexp: '^{{ item }}='
- line: '{{ item }}=yes'
- state: present
- create: yes
- with_items:
- - 'USE_PEERDNS'
- - 'NM_CONTROLLED'
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
- - name: enable and start NetworkManager
- service:
- name: 'NetworkManager'
- state: started
- enabled: yes \ No newline at end of file
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index f36caeb36..6eeba09d9 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -14,9 +14,9 @@
gather_facts: no
tasks:
- include_vars: openshift-cluster/cluster_hosts.yml
-
-- include: ../common/openshift-cluster/evaluate_groups.yml
-
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
- hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 825f46415..c0ea93d2c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,3 +1,4 @@
+---
- name: Additional master configuration
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 4cfe8617e..ca5177852 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -59,7 +59,7 @@
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
- - openshift_node_dnsmasq
+ - openshift_node_dnsmasq
post_tasks:
- modify_yaml:
dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index f2a2259e3..e3379f29b 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -50,6 +50,5 @@
- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
set_fact:
- docker_upgrade_nuke_images: True
+ docker_upgrade_nuke_images: True
when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
-
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 691961382..0a972adf6 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,3 +1,4 @@
+---
- name: Backup etcd
hosts: etcd_hosts_to_backup
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index bb7955c45..cefc7d12b 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -58,8 +58,8 @@
- include: rpm_upgrade.yml
vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- name: Remove obsolete docker-sdn-ovs.conf
@@ -72,12 +72,12 @@
- name: Ensure containerized services stopped before Docker restart
service: name={{ item }} state=stopped
with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
failed_when: false
when: openshift.common.is_containerized | bool
@@ -96,5 +96,3 @@
until: node_sched.rc == 0
retries: 3
delay: 1
-
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
index 8f64636ae..89b524f14 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
@@ -18,4 +18,3 @@
dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
yaml_key: 'masterClientConnectionOverrides.qps'
yaml_value: 20
-
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 50e25984f..48cc03b19 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -11,6 +11,6 @@
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
when: lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index f460612ba..a039d30b8 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
+ - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
index efc80edf9..e413c2b3a 100644
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
+ - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 5fcb850a2..b9716cafe 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -99,8 +99,8 @@
- openshift_facts:
role: master
local_facts:
- session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
- session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
- name: Generate master session secrets
hosts: oo_first_master
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 5769ef5cd..7b340887a 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -13,12 +13,12 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
@@ -76,4 +76,3 @@
when: openshift.common.rolling_restart_mode == 'system'
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 4e48f94d1..ffa23d26a 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master system
# https://github.com/ansible/ansible/issues/10616
shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index a5ab62dc5..25fa10450 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master
service:
name: "{{ openshift.common.service_type }}-master"
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 5e5198335..43ef8b6a1 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
+ - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
index 8468014da..8c3f32403 100644
--- a/playbooks/common/openshift-nfs/service.yml
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -15,4 +15,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
+ - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 33095c9fb..2da68ceea 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
+ - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index a7baea915..74e2420db 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 87b30aee4..65dd2b71e 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -10,7 +10,7 @@
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
-# unsupported in 1.9.+
+ # unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
- created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 68e60f9d4..afe269b7c 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -33,18 +33,17 @@
vars_files:
- vars.yml
tasks:
-
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- name: "{{ item }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
- when: item is defined
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
+ when: item is defined
#- include: ../openshift-node/terminate.yml
# vars:
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index a7baea915..74e2420db 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 31a13aa2a..78581fdfe 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -134,5 +134,5 @@
retries: 30
delay: 1
with_together:
- - '{{ instances }}'
- - '{{ ips }}'
+ - '{{ instances }}'
+ - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index 81e6d8f05..8a63d11a5 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -68,4 +68,3 @@
path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: absent
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4daaf1c91..5156789e7 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -12,10 +12,10 @@ debug_level: 2
# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
deployment_rhel7_ent_base:
image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
compression: ""
@@ -25,12 +25,12 @@ deployment_rhel7_ent_base:
deployment_vars:
origin:
image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
- compression: "{{ lookup('oo_option', 'image_compression') |
- default('xz', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
+ compression: "{{ lookup('oo_option', 'image_compression') |
+ default('xz', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 12c436eaf..98434439c 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
- | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index f460b14c8..c0bc12f55 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -111,9 +111,9 @@
public_v4: '{{ item[2] }}'
private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.etcd_names }}'
- - '{{ parsed_outputs.etcd_ips }}'
- - '{{ parsed_outputs.etcd_floating_ips }}'
+ - '{{ parsed_outputs.etcd_names }}'
+ - '{{ parsed_outputs.etcd_ips }}'
+ - '{{ parsed_outputs.etcd_floating_ips }}'
- name: Add new master instances groups and variables
add_host:
@@ -128,9 +128,9 @@
public_v4: '{{ item[2] }}'
private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.master_names }}'
- - '{{ parsed_outputs.master_ips }}'
- - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.master_names }}'
+ - '{{ parsed_outputs.master_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
- name: Add new node instances groups and variables
add_host:
@@ -145,9 +145,9 @@
public_v4: '{{ item[2] }}'
private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.node_names }}'
- - '{{ parsed_outputs.node_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.node_names }}'
+ - '{{ parsed_outputs.node_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
- name: Add new infra instances groups and variables
add_host:
@@ -162,18 +162,18 @@
public_v4: '{{ item[2] }}'
private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.infra_names }}'
- - '{{ parsed_outputs.infra_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.infra_names }}'
+ - '{{ parsed_outputs.infra_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Wait for ssh
wait_for:
host: '{{ item }}'
port: 22
with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -182,9 +182,9 @@
retries: 30
delay: 1
with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 4527f4a28..affb57117 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -1,3 +1,4 @@
+---
- name: Terminate instance(s)
hosts: localhost
become: no
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 79b336ce7..ba2855b73 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -1,3 +1,4 @@
+# yamllint disable rule:colons
---
debug_level: 2
openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index c5c95c0d2..dadd62c93 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -10,5 +10,5 @@ galaxy_info:
versions:
- 7
dependencies:
- - role: os_firewall
- os_firewall_use_firewalld: False
+- role: os_firewall
+ os_firewall_use_firewalld: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a2b18baa1..a93bdc2ad 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -86,16 +86,16 @@
line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
with_items:
- - reg_conf_var: HTTP_PROXY
- reg_fact_val: "{{ docker_http_proxy | default('') }}"
- - reg_conf_var: HTTPS_PROXY
- reg_fact_val: "{{ docker_https_proxy | default('') }}"
- - reg_conf_var: NO_PROXY
- reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
notify:
- - restart docker
+ - restart docker
when:
- - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+ - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
- name: Set various Docker options
lineinfile:
@@ -109,7 +109,7 @@
{% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- - restart docker
+ - restart docker
- name: Start the Docker service
systemd:
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index b1279aa88..ddf8230ec 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -8,4 +8,3 @@ etcd_conf_dir: "{{ openshift.common.config_base }}/master"
etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
-
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
index be6ca6b88..7ed028138 100644
--- a/roles/kube_nfs_volumes/meta/main.yml
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -13,5 +13,5 @@ galaxy_info:
versions:
- all
categories:
- - cloud
+ - cloud
dependencies: []
diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml
index 2b06613f3..36838debc 100644
--- a/roles/nuage_ca/meta/main.yml
+++ b/roles/nuage_ca/meta/main.yml
@@ -1,6 +1,6 @@
---
galaxy_info:
- author: Vishal Patil
+ author: Vishal Patil
description:
company: Nuage Networks
license: Apache License, Version 2.0
diff --git a/roles/nuage_common/defaults/main.yaml b/roles/nuage_common/defaults/main.yaml
index 16dac8720..a7803c0ee 100644
--- a/roles/nuage_common/defaults/main.yaml
+++ b/roles/nuage_common/defaults/main.yaml
@@ -1,3 +1,4 @@
+---
nuage_ca_master: "{{ groups.oo_first_master.0 }}"
nuage_ca_master_crt_dir: /usr/share/nuage-openshift-certificates
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
index cf670a9e1..c90f4f443 100644
--- a/roles/nuage_master/defaults/main.yaml
+++ b/roles/nuage_master/defaults/main.yaml
@@ -1,4 +1,4 @@
---
nuage_master_cspadminpasswd: ""
nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
+nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml
index b2a47ef71..a8a9bd3b4 100644
--- a/roles/nuage_master/meta/main.yml
+++ b/roles/nuage_master/meta/main.yml
@@ -13,10 +13,10 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_ca
- - role: nuage_common
- - role: openshift_etcd_client_certificates
- - role: os_firewall
- os_firewall_allow:
- - service: openshift-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_ca
+- role: nuage_common
+- role: openshift_etcd_client_certificates
+- role: os_firewall
+ os_firewall_allow:
+ - service: openshift-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_master/tasks/certificates.yml b/roles/nuage_master/tasks/certificates.yml
index 0a2f375cd..c16616e1c 100644
--- a/roles/nuage_master/tasks/certificates.yml
+++ b/roles/nuage_master/tasks/certificates.yml
@@ -1,11 +1,11 @@
---
- name: Create a directory to hold the certificates
file: path="{{ nuage_mon_rest_server_crt_dir }}" state=directory
- delegate_to: "{{ nuage_ca_master }}"
+ delegate_to: "{{ nuage_ca_master }}"
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_mon_rest_server_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_master_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
+ file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index b8eaede3b..d211d30e8 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,13 +1,13 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
become: yes
- file: path=/usr/share/nuage-openshift-monitor state=directory
+ file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
-- name: Install Nuage Openshift Monitor
+- name: Install Nuage Openshift Monitor
become: yes
yum: name={{ nuage_openshift_rpm }} state=present
@@ -17,12 +17,12 @@
become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
-- include: certificates.yml
+- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
become: yes
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index b395eba99..dba399a03 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,3 +1,4 @@
+---
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
@@ -6,7 +7,7 @@ ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig
-kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
+kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
nuage_mon_rest_server_url: "0.0.0.0:{{ nuage_mon_rest_server_port }}"
nuage_mon_rest_server_logdir: "{{ nuage_openshift_monitor_log_dir | default('/var/log/nuage-openshift-monitor') }}"
@@ -14,18 +15,18 @@ nuage_mon_log_level: "{{ nuage_openshift_monitor_log_level | default('3') }}"
nuage_mon_rest_server_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.key"
-nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
+nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(openshift.common.hostname) }}"
-nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_master_crt_dir: /usr/share/nuage-openshift-monitor
nuage_service_account: system:serviceaccount:default:nuage
nuage_service_account_config:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: nuage
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
nuage_tasks:
- - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml
index f96318611..3e2a5e0c9 100644
--- a/roles/nuage_node/meta/main.yml
+++ b/roles/nuage_node/meta/main.yml
@@ -13,11 +13,11 @@ galaxy_info:
- cloud
- system
dependencies:
- - role: nuage_common
- - role: nuage_ca
- - role: os_firewall
- os_firewall_allow:
- - service: vxlan
- port: 4789/udp
- - service: nuage-monitor
- port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_common
+- role: nuage_ca
+- role: os_firewall
+ os_firewall_allow:
+ - service: vxlan
+ port: 4789/udp
+ - service: nuage-monitor
+ port: "{{ nuage_mon_rest_server_port }}/tcp"
diff --git a/roles/nuage_node/tasks/certificates.yml b/roles/nuage_node/tasks/certificates.yml
index 7fcd4274d..d1c8bf59a 100644
--- a/roles/nuage_node/tasks/certificates.yml
+++ b/roles/nuage_node/tasks/certificates.yml
@@ -5,7 +5,7 @@
- name: Create the key
command: >
- openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
+ openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
delegate_to: "{{ nuage_ca_master }}"
- name: Create the req file
@@ -30,7 +30,7 @@
shell: "cd {{ nuage_plugin_rest_client_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
delegate_to: "{{ nuage_ca_master }}"
-- name: Create a temp directory for the certificates
+- name: Create a temp directory for the certificates
local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
register: mktemp
@@ -42,7 +42,7 @@
unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_plugin_crt_dir }}
- name: Delete the certificates after copy
- file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
+ file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
delegate_to: "{{ nuage_ca_master }}"
- name: Delete the temp directory
diff --git a/roles/nuage_node/tasks/iptables.yml b/roles/nuage_node/tasks/iptables.yml
index 52935f075..8e2c29620 100644
--- a/roles/nuage_node/tasks/iptables.yml
+++ b/roles/nuage_node/tasks/iptables.yml
@@ -5,7 +5,7 @@
always_run: yes
- name: Allow traffic from overlay to underlay
- command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
+ command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
when: "'nuage-overlay-underlay' not in iptablesrules.stdout"
notify:
- save iptable rules
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 2ec4be2c2..d82dd36a4 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -2,16 +2,16 @@
- name: Install Nuage VRS
become: yes
yum: name={{ vrs_rpm }} state=present
-
-- name: Set the uplink interface
+
+- name: Set the uplink interface
become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
-- name: Set the Active Controller
+- name: Set the Active Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
-- name: Set the Standby Controller
+- name: Set the Standby Controller
become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
@@ -24,18 +24,18 @@
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- - ca.crt
- - nuage.crt
- - nuage.key
- - nuage.kubeconfig
+ - ca.crt
+ - nuage.crt
+ - nuage.key
+ - nuage.kubeconfig
- include: certificates.yml
-- name: Set the vsp-openshift.yaml
+- name: Set the vsp-openshift.yaml
become: yes
- template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
+ template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
- - restart node
+ - restart node
- include: iptables.yml
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 86486259f..7b789152f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -17,6 +17,6 @@ plugin_log_level: "{{ nuage_plugin_log_level | default('err') }}"
nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
-nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
-nuage_plugin_crt_dir : /usr/share/vsp-openshift
+nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index 6a4e919e8..1f44b29b9 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -15,10 +15,9 @@
no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
-
+
- name: Set builddefaults config structure
openshift_facts:
role: builddefaults
local_facts:
config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
-
diff --git a/roles/openshift_cloud_provider/tasks/aws.yml b/roles/openshift_cloud_provider/tasks/aws.yml
index 127a5b392..5fa8773f5 100644
--- a/roles/openshift_cloud_provider/tasks/aws.yml
+++ b/roles/openshift_cloud_provider/tasks/aws.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index 14ad8ba94..ee4048911 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -1,3 +1,4 @@
+---
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index c9a44b3f5..0a476ac26 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -4,11 +4,11 @@
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
- fail:
- msg: Nuage sdn can not be used with flannel
+ msg: Nuage sdn can not be used with flannel
when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
- fail:
@@ -46,4 +46,3 @@
command: >
hostnamectl set-hostname {{ openshift.common.hostname }}
when: openshift_set_hostname | default(set_hostname_default) | bool
-
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index c690c5243..613c237a3 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -9,7 +9,7 @@
additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
- log_driver: "{{ openshift_docker_log_driver | default(None) }}"
+ log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index e843049f9..fc4b56bbf 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -12,8 +12,8 @@ examples_base: "{{ openshift.common.config_base if openshift.common.is_container
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
rhel_image_streams:
- - "{{ image_streams_base}}/image-streams-rhel7.json"
- - "{{ image_streams_base}}/dotnet_imagestreams.json"
+ - "{{ image_streams_base}}/image-streams-rhel7.json"
+ - "{{ image_streams_base}}/dotnet_imagestreams.json"
db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
diff --git a/roles/openshift_expand_partition/meta/main.yml b/roles/openshift_expand_partition/meta/main.yml
index a596d6c63..dea6b6ee0 100644
--- a/roles/openshift_expand_partition/meta/main.yml
+++ b/roles/openshift_expand_partition/meta/main.yml
@@ -13,6 +13,6 @@ galaxy_info:
versions:
- all
categories:
- - openshift
- - cloud
+ - openshift
+ - cloud
dependencies: []
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 7b1b3f6ff..e56a68e27 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,3 +1,4 @@
+---
- fail:
msg: >
Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
index 8754616d9..70b0d67a4 100644
--- a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
@@ -1,59 +1,59 @@
---
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
- - name: "Checking for logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
- register: logging_project
- failed_when: "'FAILED' in logging_project.stderr"
+- name: "Checking for logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+ register: logging_project
+ failed_when: "'FAILED' in logging_project.stderr"
- - name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+- name: "Changing projects"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
- - name: "Cleanup any previous logging infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
- with_items:
- - kibana
- - fluentd
- - elasticsearch
- ignore_errors: yes
+- name: "Cleanup any previous logging infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+ with_items:
+ - kibana
+ - fluentd
+ - elasticsearch
+ ignore_errors: yes
- - name: "Cleanup existing support infrastructure"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
- ignore_errors: yes
+- name: "Cleanup existing support infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+ ignore_errors: yes
- - name: "Cleanup existing secrets"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
- ignore_errors: yes
- register: clean_result
- failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+- name: "Cleanup existing secrets"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+ ignore_errors: yes
+ register: clean_result
+ failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
- - name: "Cleanup existing logging deployers"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+- name: "Cleanup existing logging deployers"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
- - name: "Cleanup logging project"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+- name: "Cleanup logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
- - name: "Remove deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
- register: delete_output
- failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
+- name: "Remove deployer template"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+ register: delete_output
+ failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
- - debug: msg="Success!"
+- debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 625af9acd..513a74c69 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -1,175 +1,175 @@
---
- - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
- when: target_registry is defined and target_registry
-
- - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
- when: "openshift_hosted_logging_hostname is not defined or
- openshift_hosted_logging_elasticsearch_cluster_size is not defined or
- openshift_hosted_logging_master_public_url is not defined"
-
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- changed_when: False
-
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
-
- - name: "Check for logging project already exists"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
- register: logging_project_result
- ignore_errors: True
-
- - name: "Create logging project"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- when: logging_project_result.stdout == ""
-
- - name: "Changing projects"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
- - name: "Creating logging deployer secret"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
- register: secret_output
- failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
-
- - name: "Create templates for logging accounts and the deployer"
- command: >
- {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
- -f {{ hosted_base }}/logging-deployer.yaml
- --config={{ mktemp.stdout }}/admin.kubeconfig
- -n logging
- register: logging_import_template
- failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
- changed_when: "'created' in logging_import_template.stdout"
-
- - name: "Process the logging accounts template"
- shell: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
- register: process_deployer_accounts
- failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
- - name: "Set permissions for logging-deployer service account"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
- register: permiss_output
- failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
-
- - name: "Set permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
-
- - name: "Set additional permissions for fluentd"
- command: >
- {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
- add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
-
- - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
- command: >
- {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
- policy add-cluster-role-to-user rolebinding-reader \
- system:serviceaccount:logging:aggregated-logging-elasticsearch
- register: rolebinding_reader_output
- failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
-
- - name: "Create ConfigMap for deployer parameters"
- command: >
- {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
- register: deployer_configmap_output
- failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
-
- - name: "Process the deployer template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
- register: process_deployer
- failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
- - name: "Wait for image pull and deployer pod"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 15
-
- - name: "Process imagestream template"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: process_is
- failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
- - name: "Set insecured registry"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
- - name: "Wait for imagestreams to become available"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
- when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 and 'not found' not in result.stderr
- retries: 20
- delay: 5
-
- - name: "Wait for component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es
- - kibana
- - curator
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for ops component pods to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
- with_items:
- - es-ops
- - kibana-ops
- - curator-ops
- when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - name: "Wait for fluentd DaemonSet to exist"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 5
-
- - name: "Deploy fluentd by labeling the node"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
- - name: "Wait for fluentd to be running"
- shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
- register: result
- until: result.rc == 0
- failed_when: result.rc == 1 or 'Error' in result.stderr
- retries: 20
- delay: 15
-
- - debug:
- msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
+ when: target_registry is defined and target_registry
+
+- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+ when: "openshift_hosted_logging_hostname is not defined or
+ openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+ openshift_hosted_logging_master_public_url is not defined"
+
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+- name: "Check for logging project already exists"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
+ register: logging_project_result
+ ignore_errors: True
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ when: logging_project_result.stdout == ""
+
+- name: "Changing projects"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
+
+- name: "Creating logging deployer secret"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
+ register: secret_output
+ failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+- name: "Create templates for logging accounts and the deployer"
+ command: >
+ {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f {{ hosted_base }}/logging-deployer.yaml
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ -n logging
+ register: logging_import_template
+ failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
+ changed_when: "'created' in logging_import_template.stdout"
+
+- name: "Process the logging accounts template"
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
+ register: process_deployer_accounts
+ failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
+
+- name: "Set permissions for logging-deployer service account"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+ register: permiss_output
+ failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+- name: "Set permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+- name: "Set additional permissions for fluentd"
+ command: >
+ {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
+ add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+ policy add-cluster-role-to-user rolebinding-reader \
+ system:serviceaccount:logging:aggregated-logging-elasticsearch
+ register: rolebinding_reader_output
+ failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+
+- name: "Create ConfigMap for deployer parameters"
+ command: >
+ {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
+ register: deployer_configmap_output
+ failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+
+- name: "Process the deployer template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
+ register: process_deployer
+ failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
+
+- name: "Wait for image pull and deployer pod"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 15
+
+- name: "Process imagestream template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: process_is
+ failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
+
+- name: "Set insecured registry"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+
+- name: "Wait for imagestreams to become available"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+ when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 and 'not found' not in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Wait for component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es
+ - kibana
+ - curator
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for ops component pods to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+ with_items:
+ - es-ops
+ - kibana-ops
+ - curator-ops
+ when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- name: "Wait for fluentd DaemonSet to exist"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 5
+
+- name: "Deploy fluentd by labeling the node"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
+
+- name: "Wait for fluentd to be running"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 or 'Error' in result.stderr
+ retries: 20
+ delay: 15
+
+- debug:
+ msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
index 11412733b..33320e9c8 100644
--- a/roles/openshift_hosted_logging/vars/main.yaml
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -1,3 +1,4 @@
+---
tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 37d4679ef..3f24fd6be 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -1,13 +1,14 @@
+---
manageiq_cluster_role:
- apiVersion: v1
- kind: ClusterRole
- metadata:
- name: management-infra-admin
- rules:
- - resources:
- - pods/proxy
- verbs:
- - '*'
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: management-infra-admin
+ rules:
+ - resources:
+ - pods/proxy
+ verbs:
+ - '*'
manageiq_metrics_admin_clusterrole:
apiVersion: v1
@@ -24,28 +25,28 @@ manageiq_metrics_admin_clusterrole:
- '*'
manageiq_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: management-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: management-admin
manageiq_image_inspector_service_account:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: inspector-admin
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: inspector-admin
manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
manage_iq_tasks:
- - policy add-role-to-user -n management-infra admin -z management-admin
- - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
- - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
- - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
- - policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
- - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- policy add-role-to-user -n management-infra admin -z management-admin
+- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
+- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
manage_iq_openshift_3_2_tasks:
- - policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index e2b722abd..39ea42ab3 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,3 +1,4 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks. For that reason the ha_svc variables are use set_fact instead of
# the vars directory on the role.
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index f5923ecf8..0dba4b3ba 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -92,8 +92,8 @@
controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
- kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
+ kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index a5ad580e7..fa745eb66 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -23,4 +23,3 @@ builddefaults_yaml:
value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- name: no_proxy
value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
-
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index be3256f02..68e4a48b9 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -38,9 +38,9 @@
get pods -l {{ item }} | grep -q Running
register: metrics_pods_status
with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
failed_when: false
changed_when: false
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 0331bcb89..6c207d6ac 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -1,6 +1,7 @@
+---
hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
metrics_deployer_sa:
apiVersion: v1
@@ -8,7 +9,7 @@ metrics_deployer_sa:
metadata:
name: metrics-deployer
secrets:
- - name: metrics-deployer
+ - name: metrics-deployer
hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 8b669a2c6..626c47387 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,3 +1,4 @@
+---
# This file is included both in the openshift_master role and in the upgrade
# playbooks.
@@ -68,12 +69,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
- - regex: '^CONFIG_FILE='
- line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- - regex: '^IMAGE_VERSION='
- line: "IMAGE_VERSION={{ openshift_image_tag }}"
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+ - regex: '^IMAGE_VERSION='
+ line: "IMAGE_VERSION={{ openshift_image_tag }}"
notify:
- restart node
@@ -84,12 +85,12 @@
line: "{{ item.line }}"
create: true
with_items:
- - regex: '^HTTP_PROXY='
- line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
- - regex: '^HTTPS_PROXY='
- line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
- - regex: '^NO_PROXY='
- line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+ - regex: '^HTTP_PROXY='
+ line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+ - regex: '^HTTPS_PROXY='
+ line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+ - regex: '^NO_PROXY='
+ line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
notify:
- restart node
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index 4d1bd3794..d5fda7bd0 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -1,2 +1,2 @@
---
-- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." \ No newline at end of file
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
diff --git a/roles/openshift_repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index 319611a0b..da48e42c1 100644
--- a/roles/openshift_repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
@@ -4,4 +4,4 @@
# enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
# atomic-enterprise uses Red Hat packages named 'atomic-openshift'
# openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']
+known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']
diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
index 8715fc64e..b8cbe9a84 100644
--- a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
+++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
@@ -1,3 +1,4 @@
+---
####
#
# OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
@@ -9,7 +10,7 @@
path: /tmp/openshift
state: directory
owner: root
- mode: 700
+ mode: 0700
- name: Create service account configs
template:
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index 1ff9e6dcb..d83ccf7de 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -1,3 +1,4 @@
+---
- name: test if service accounts exists
command: >
{{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index bed1216f8..ea7c9bb45 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -13,5 +13,5 @@ galaxy_info:
versions:
- all
categories:
- - openshift
+ - openshift
dependencies: []
diff --git a/roles/rhel_subscribe/meta/main.yml b/roles/rhel_subscribe/meta/main.yml
index 6204a5aa5..0bbeadd34 100644
--- a/roles/rhel_subscribe/meta/main.yml
+++ b/roles/rhel_subscribe/meta/main.yml
@@ -1,2 +1,3 @@
+---
dependencies:
-- role: openshift_facts
+ - role: openshift_facts
diff --git a/utils/Makefile b/utils/Makefile
index ad6735cb5..c061edd8c 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -32,6 +32,10 @@ ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
MANPAGES := docs/man/man1/atomic-openshift-installer.1
VERSION := 1.3
+# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
+YAMLFILES = $(shell find ../ -name $(VENV) -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" 2>&1)
+PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+
sdist: clean
python setup.py sdist
rm -fR $(SHORTNAME).egg-info
@@ -86,7 +90,13 @@ ci-pylint: $(VENV)
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+ . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES)
+
+ci-yamllint: $(VENV)
+ @echo "#############################################"
+ @echo "# Running yamllint Tests in virtualenv"
+ @echo "#############################################"
+ @. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES)
ci-list-deps: $(VENV)
@echo "#############################################"
@@ -101,9 +111,9 @@ ci-flake8: $(VENV)
. $(VENV)/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory"
. $(VENV)/bin/activate && python setup.py flake8
-ci: ci-list-deps ci-unittests ci-flake8 ci-pylint
+ci: ci-list-deps ci-unittests ci-flake8 ci-pylint ci-yamllint
@echo
@echo "##################################################################################"
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
@echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8'"
+ @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index eeaf106ec..b70a03563 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -10,3 +10,4 @@ PyYAML
click
backports.functools_lru_cache
pyOpenSSL
+yamllint