summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md10
-rw-r--r--openshift-ansible.spec52
-rw-r--r--playbooks/aws/README.md10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_s3.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml (renamed from playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml32
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml52
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml26
-rw-r--r--playbooks/init/base_packages.yml3
-rw-r--r--playbooks/openshift-etcd/private/upgrade_main.yml33
-rw-r--r--playbooks/openshift-hosted/private/redeploy-registry-certificates.yml3
-rw-r--r--playbooks/openshift-hosted/private/redeploy-router-certificates.yml3
-rw-r--r--playbooks/openshift-master/private/tasks/restart_services.yml4
-rw-r--r--roles/container_runtime/templates/crio-network.j26
-rw-r--r--roles/openshift_aws/defaults/main.yml45
-rw-r--r--roles/openshift_aws/tasks/uninstall_s3.yml26
l---------roles/openshift_certificate_expiry/examples/playbooks2
-rw-r--r--roles/openshift_cloud_provider/defaults/main.yml4
-rw-r--r--roles/openshift_cloud_provider/tasks/gce.yml18
-rw-r--r--roles/openshift_daemonset_config/defaults/main.yml13
-rw-r--r--roles/openshift_daemonset_config/tasks/main.yml13
-rw-r--r--roles/openshift_daemonset_config/templates/daemonset.yml.j226
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml2
-rw-r--r--roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml46
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml35
-rw-r--r--roles/openshift_node/tasks/storage_plugins/iscsi.yml1
-rw-r--r--roles/openshift_openstack/defaults/main.yml5
-rw-r--r--roles/openshift_openstack/templates/user_data.j24
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j22
-rw-r--r--roles/openshift_prometheus/vars/default_images.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml5
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml2
-rw-r--r--roles/openshift_version/tasks/first_master.yml4
-rw-r--r--roles/openshift_version/tasks/first_master_containerized_version.yml1
-rw-r--r--roles/openshift_version/tasks/first_master_rpm_version.yml1
38 files changed, 319 insertions, 190 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index bdfa06c4a..120ce408f 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.35.0 ./
+3.9.0-0.38.0 ./
diff --git a/README.md b/README.md
index 609930dcd..55dc146a2 100644
--- a/README.md
+++ b/README.md
@@ -74,6 +74,16 @@ Fedora:
dnf install -y ansible pyOpenSSL python-cryptography python-lxml
```
+Additional requirements:
+
+Logging:
+
+- java-1.8.0-openjdk-headless
+
+Metrics:
+
+- httpd-tools
+
## Simple all-in-one localhost Installation
This assumes that you've installed the base dependencies and you're running on
Fedora or RHEL
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 48f666a07..d14eb56cb 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.35.0%{?dist}
+Release: 0.38.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -201,6 +201,56 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Feb 05 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.38.0
+- Moving upgrade sg playbook to 3.9 (kwoodson@redhat.com)
+- remove openshift_upgrade_{pre,post}_storage_migration_enabled from
+ failed_when (nakayamakenjiro@gmail.com)
+- Fix version handling in 3.8/3.9 control plane upgrades (rteague@redhat.com)
+- add S3 bucket cleanup (jdiaz@redhat.com)
+- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com)
+- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com)
+- Parameterize user and disable_root options in cloud config
+ (nelluri@redhat.com)
+- Fix softlinks broken by d3fefc32a727fe3c13159c4e9fe4399f35b487a8
+ (Klaas-@users.noreply.github.com)
+
+* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.37.0
+- Don't use 'omit' for package module (vrutkovs@redhat.com)
+- Adding requirements for logging and metrics (ewolinet@redhat.com)
+- Disable master controllers before upgrade and re-enable those when restart
+ mode is system (vrutkovs@redhat.com)
+- upgrade: run upgrade_control_plane and upgrade_nodes playbooks during full
+ upgrade (vrutkovs@redhat.com)
+
+* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.36.0
+- Add missing tasks file (sdodson@redhat.com)
+- Upgrade to migrate to using push to DNS for registries. (kwoodson@redhat.com)
+- Adding defaults for the gcp variables to fix an undefined ansible exception.
+ (kwoodson@redhat.com)
+- Fix vsphere sanitization (sdodson@redhat.com)
+- Set a default for required vsphere variable (sdodson@redhat.com)
+- Add python2-crypto package (ccoleman@redhat.com)
+- hosts.example: clarify usage of openshift_master_cluster_public_hostname
+ (vrutkovs@redhat.com)
+- Conditionally create pvcs for metrics depending on whether or not it already
+ exists (ewolinet@redhat.com)
+- Update hosts examples with a note about scheduling on masters
+ (vrutkovs@redhat.com)
+- Fixing file write issue. (kwoodson@redhat.com)
+- Only perform console configmap ops when >= 3.9 (sdodson@redhat.com)
+- Remove playbooks/adhoc/openshift_hosted_logging_efk.yaml (sdodson@redhat.com)
+- upgrades: use openshift_version as a regexp when checking
+ openshift.common.version (vrutkovs@redhat.com)
+- Don't update master-config.yaml with logging/metrics urls >= 3.9
+ (sdodson@redhat.com)
+- Make master schedulable (vrutkovs@redhat.com)
+- Re-add openshift_aws_elb_cert_arn. (abutcher@redhat.com)
+- Ignore openshift_pkg_version during 3.8 upgrade (rteague@redhat.com)
+- bug 1537857. Fix retrieving prometheus metrics (jcantril@redhat.com)
+- Remove master_ha bool checks (mgugino@redhat.com)
+- Don't restart docker when re-deploying node certificates (sdodson@redhat.com)
+- vsphere storage default add (davis.phillips@gmail.com)
+
* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0
- add glusterblock support for ansible (m.judeikis@gmail.com)
- Add a bare minimum localhost hosts file (sdodson@redhat.com)
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index bdc98d1e0..cf811ca84 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl
## Uninstall / Deprovisioning
-At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
-
-To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
```
ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
@@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi
This should result in removal of the security groups and VPC that were created.
+Cleaning up the S3 bucket contents can be accomplished with:
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml
+```
+
NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml
new file mode 100644
index 000000000..448b47aee
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml
@@ -0,0 +1,10 @@
+---
+- name: Empty/delete s3 bucket
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: empty/delete s3 bucket
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_s3.yml
+ when: openshift_aws_create_s3 | default(true) | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
index 23a3fcbb5..23a3fcbb5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
index edc541ef9..44af37b2d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml
@@ -5,8 +5,6 @@
# Pre-upgrade
- import_playbook: ../initialize_nodes_to_upgrade.yml
-- import_playbook: verify_cluster.yml
-
- name: Update repos on upgrade hosts
hosts: "{{ l_upgrade_repo_hosts }}"
roles:
@@ -53,6 +51,8 @@
# l_openshift_version_set_hosts is passed via upgrade_control_plane.yml
# l_openshift_version_check_hosts is passed via upgrade_control_plane.yml
+- import_playbook: verify_cluster.yml
+
# If we're only upgrading nodes, we need to ensure masters are already upgraded
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
index 5ee8a9d78..463a05688 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
@@ -17,6 +17,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_pkg_version is defined
+ - openshift_pkg_version != ""
- openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
- fail:
@@ -25,6 +26,7 @@
valid version for a {{ openshift_upgrade_target }} upgrade
when:
- openshift_image_tag is defined
+ - openshift_image_tag != ""
- openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c27118f6f..baec057f9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,29 +3,6 @@
# Upgrade Masters
###############################################################################
-# Prior to 3.6, openshift-ansible created etcd serving certificates
-# without a SubjectAlternativeName entry for the system hostname. The
-# SAN list in Go 1.8 is now (correctly) authoritative and since
-# openshift-ansible configures masters to talk to etcd hostnames
-# rather than IP addresses, we must correct etcd certificates.
-#
-# This play examines the etcd serving certificate SANs on each etcd
-# host and records whether or not the system hostname is missing.
-- name: Examine etcd serving certificate SAN
- hosts: oo_etcd_to_config
- tasks:
- - slurp:
- src: /etc/etcd/server.crt
- register: etcd_serving_cert
- - set_fact:
- __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
-
-# Redeploy etcd certificates when hostnames were missing from etcd
-# serving certificate SANs.
-- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml
- when:
- - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
-
- name: Backup and upgrade etcd
import_playbook: ../../../openshift-etcd/private/upgrade_main.yml
@@ -56,7 +33,6 @@
register: l_pb_upgrade_control_plane_pre_upgrade_storage
when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
- openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
@@ -94,6 +70,12 @@
- include_tasks: "{{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
+ - name: Disable master controller
+ service:
+ name: "{{ openshift_service_type }}-master-controllers"
+ enabled: false
+ when: openshift.common.rolling_restart_mode == 'system'
+
- include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml
when: openshift.common.rolling_restart_mode == 'system'
@@ -116,7 +98,6 @@
- openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- openshift_version is version_compare('3.7','<')
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
run_once: true
@@ -252,7 +233,6 @@
register: l_pb_upgrade_control_plane_post_upgrade_storage
when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
failed_when:
- - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
- l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
- openshift_upgrade_post_storage_migration_fatal | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
index bf6e8605e..ec1da6d39 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
@@ -2,54 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- import_playbook: ../init.yml
+- import_playbook: upgrade_control_plane.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_upgrade_target: '3.9'
- openshift_upgrade_min: '3.7'
- openshift_release: '3.9'
-
-- import_playbook: ../pre/config.yml
- vars:
- l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
- l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
- l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
- l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
- openshift_protect_installed_version: False
-
-- import_playbook: validator.yml
-
-- name: Flag pre-upgrade checks complete for hosts without errors
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - set_fact:
- pre_upgrade_complete: True
-
-# Pre-upgrade completed
-
-- import_playbook: ../upgrade_control_plane.yml
-
-# All controllers must be stopped at the same time then restarted
-- name: Cycle all controller services to force new leader election mode
- hosts: oo_masters_to_config
- gather_facts: no
- roles:
- - role: openshift_facts
- tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
- name: "{{ openshift_service_type }}-master-controllers"
- state: started
-
-- import_playbook: ../upgrade_nodes.yml
-
-- import_playbook: ../post_control_plane.yml
+- import_playbook: upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
index c8a42322d..8792295c6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
@@ -26,6 +26,7 @@
openshift_upgrade_min: '3.7'
openshift_release: '3.8'
_requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}"
+ openshift_pkg_version: ''
_requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"
l_double_upgrade_cp: True
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
@@ -61,10 +62,8 @@
# Pre-upgrade completed
-- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.8'
- openshift_pkg_version: ''
+- name: Intermediate 3.8 Upgrade
+ import_playbook: ../upgrade_control_plane.yml
when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')
## 3.8 upgrade complete we should now be able to upgrade to 3.9
@@ -77,7 +76,7 @@
openshift_upgrade_target: '3.9'
openshift_upgrade_min: '3.8'
openshift_release: '3.9'
- openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}"
+ openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"
# Set the user's specified image_tag for 3.9 upgrade if it was provided.
- set_fact:
openshift_image_tag: "{{ _requested_image_tag }}"
@@ -106,6 +105,7 @@
l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
l_upgrade_excluder_hosts: "oo_masters_to_config"
openshift_protect_installed_version: False
+ openshift_version_reinit: True
- name: Flag pre-upgrade checks complete for hosts without errors
hosts: oo_masters_to_config:oo_etcd_to_config
@@ -114,8 +114,6 @@
pre_upgrade_complete: True
- import_playbook: ../upgrade_control_plane.yml
- vars:
- openshift_release: '3.9'
# All controllers must be stopped at the same time then restarted
- name: Cycle all controller services to force new leader election mode
@@ -124,14 +122,16 @@
roles:
- role: openshift_facts
tasks:
- - name: Stop {{ openshift_service_type }}-master-controllers
- systemd:
+ - name: Restart master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: stopped
- - name: Start {{ openshift_service_type }}-master-controllers
- systemd:
+ state: restart
+ when: openshift.common.rolling_restart_mode == 'service'
+ - name: Re-enable master controllers to force new leader election mode
+ service:
name: "{{ openshift_service_type }}-master-controllers"
- state: started
+ enabled: true
+ when: openshift.common.rolling_restart_mode == 'system'
- import_playbook: ../post_control_plane.yml
diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml
index 0a730a88a..81f4dd183 100644
--- a/playbooks/init/base_packages.yml
+++ b/playbooks/init/base_packages.yml
@@ -16,8 +16,9 @@
- iproute
- "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
- "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
- - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}"
+ - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"
- yum-utils
+ when: item != ''
register: result
until: result is succeeded
diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml
index 8997680f9..fea588260 100644
--- a/playbooks/openshift-etcd/private/upgrade_main.yml
+++ b/playbooks/openshift-etcd/private/upgrade_main.yml
@@ -1,4 +1,37 @@
---
+# Prior to 3.6, openshift-ansible created etcd serving certificates
+# without a SubjectAlternativeName entry for the system hostname. The
+# SAN list in Go 1.8 is now (correctly) authoritative and since
+# openshift-ansible configures masters to talk to etcd hostnames
+# rather than IP addresses, we must correct etcd certificates.
+#
+# This play examines the etcd serving certificate SANs on each etcd
+# host and records whether or not the system hostname is missing.
+- name: Examine etcd serving certificate SAN
+ hosts: oo_etcd_to_config
+ tasks:
+ - slurp:
+ src: /etc/etcd/server.crt
+ register: etcd_serving_cert
+ - set_fact:
+ __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}"
+
+# Redeploy etcd certificates when hostnames were missing from etcd
+# serving certificate SANs.
+- import_playbook: redeploy-certificates.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: restart.yml
+ vars:
+ g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}"
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
+- import_playbook: ../../openshift-master/private/restart.yml
+ when:
+ - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false])
+
# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
index b817221b8..d88209593 100644
--- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml
@@ -88,8 +88,7 @@
- name: Redeploy docker registry
command: >
- {{ openshift_client_binary }} deploy dc/docker-registry
- --latest
+ {{ openshift_client_binary }} rollout latest dc/docker-registry
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
index 0df748f47..952a5f4ee 100644
--- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
+++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml
@@ -129,8 +129,7 @@
- name: Redeploy router
command: >
- {{ openshift_client_binary }} deploy dc/router
- --latest
+ {{ openshift_client_binary }} rollout latest dc/router
--config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig
-n default
diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml
new file mode 100644
index 000000000..cf2c282e3
--- /dev/null
+++ b/playbooks/openshift-master/private/tasks/restart_services.yml
@@ -0,0 +1,4 @@
+---
+- import_role:
+ name: openshift_master
+ tasks_from: restart.yml
diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2
index 763be97d7..ae8a506fe 100644
--- a/roles/container_runtime/templates/crio-network.j2
+++ b/roles/container_runtime/templates/crio-network.j2
@@ -1,9 +1,9 @@
{% if 'http_proxy' in openshift.common %}
-HTTP_PROXY={{ openshift.common.http_proxy }}
+export HTTP_PROXY={{ openshift.common.http_proxy }}
{% endif %}
{% if 'https_proxy' in openshift.common %}
-HTTPS_PROXY={{ openshift.common.https_proxy }}
+export HTTPS_PROXY={{ openshift.common.https_proxy }}
{% endif %}
{% if 'no_proxy' in openshift.common %}
-NO_PROXY={{ openshift.common.no_proxy }}
+export NO_PROXY={{ openshift.common.no_proxy }}
{% endif %}
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index c8d385db5..3d966e34a 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -173,18 +173,20 @@ openshift_aws_node_groups:
openshift_aws_created_asgs: []
openshift_aws_current_asgs: []
+openshift_aws_scale_group_health_check:
+ period: 60
+ type: EC2
+
# these will be used during upgrade
openshift_aws_master_group_config:
# The 'master' key is always required here.
master:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_master_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_master_group_min_size | default(3) }}"
+ max_size: "{{ openshift_aws_master_group_max_size | default(3) }}"
+ desired_size: "{{ openshift_aws_master_group_desired_size | default(3) }}"
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
@@ -196,14 +198,12 @@ openshift_aws_master_group_config:
openshift_aws_node_group_config:
# The 'compute' key is always required here.
compute:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_compute_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_compute_group_min_size | default(3) }}"
+ max_size: "{{ openshift_aws_compute_group_max_size | default(100) }}"
+ desired_size: "{{ openshift_aws_compute_group_desired_size | default(3) }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -211,14 +211,12 @@ openshift_aws_node_group_config:
policy_json: "{{ openshift_aws_iam_role_policy_json }}"
# The 'infra' key is always required here.
infra:
- instance_type: m4.xlarge
+ instance_type: "{{ openshift_aws_infra_group_instance_type | default('m4.xlarge') }}"
volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
+ health_check: "{{ openshift_aws_scale_group_health_check }}"
+ min_size: "{{ openshift_aws_infra_group_min_size | default(2) }}"
+ max_size: "{{ openshift_aws_infra_group_max_size | default(20) }}"
+ desired_size: "{{ openshift_aws_infra_group_desired_size | default(2) }}"
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
iam_role: "{{ openshift_aws_iam_role_name }}"
@@ -322,3 +320,8 @@ openshift_aws_masters_groups: masters,etcd,nodes
# By default, don't delete things like the shared IAM instance
# profile and uploaded ssh keys
openshift_aws_enable_uninstall_shared_objects: False
+# S3 bucket names are global by default and can take minutes/hours for the
+# name to become available for re-use (assuming someone doesn't take the
+# name in the meantime). Default to just emptying the contents of the S3
+# bucket if we've been asked to create the bucket during provisioning.
+openshift_aws_really_delete_s3_bucket: False
diff --git a/roles/openshift_aws/tasks/uninstall_s3.yml b/roles/openshift_aws/tasks/uninstall_s3.yml
new file mode 100644
index 000000000..0b08cbeed
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_s3.yml
@@ -0,0 +1,26 @@
+---
+- name: empty S3 bucket
+ block:
+ - name: get S3 object list
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: list
+ region: "{{ openshift_aws_region }}"
+ register: s3_out
+
+ - name: delete S3 objects
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: delobj
+ object: "{{ item }}"
+ with_items: "{{ s3_out.s3_keys }}"
+ when: openshift_aws_create_s3 | bool
+
+- name: delete S3 bucket
+ aws_s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: delete
+ region: "{{ openshift_aws_region }}"
+ when:
+ - openshift_aws_create_s3 | bool
+ - openshift_aws_really_delete_s3_bucket | bool
diff --git a/roles/openshift_certificate_expiry/examples/playbooks b/roles/openshift_certificate_expiry/examples/playbooks
index 586afb0d5..751c3d14e 120000
--- a/roles/openshift_certificate_expiry/examples/playbooks
+++ b/roles/openshift_certificate_expiry/examples/playbooks
@@ -1 +1 @@
-../../../playbooks/certificate_expiry \ No newline at end of file
+../../../playbooks/openshift-checks/certificate_expiry \ No newline at end of file
diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml
new file mode 100644
index 000000000..37cbf5603
--- /dev/null
+++ b/roles/openshift_cloud_provider/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+openshift_gcp_project: ''
+openshift_gcp_prefix: ''
+openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network"
diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml
index 395bd304c..9e1c31b1d 100644
--- a/roles/openshift_cloud_provider/tasks/gce.yml
+++ b/roles/openshift_cloud_provider/tasks/gce.yml
@@ -1,4 +1,12 @@
---
+- name: check variables are passed
+ fail:
+ msg: "Ensure correct variables are defined for gcp. {{ item }}"
+ when: item == ''
+ with_items:
+ - "{{ openshift_gcp_project }}"
+ - "{{ openshift_gcp_prefix }}"
+
# Work around ini_file create option in 2.2 which defaults to no
- name: Create cloud config file
file:
@@ -16,8 +24,8 @@
option: "{{ item.key }}"
value: "{{ item.value }}"
with_items:
- - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
- - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
- - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
- - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
- - { key: 'multizone', value: 'false' }
+ - { key: 'project-id', value: '{{ openshift_gcp_project }}' }
+ - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' }
+ - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' }
+ - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' }
+ - { key: 'multizone', value: 'false' }
diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml
index ebe5671d2..bb9803c2b 100644
--- a/roles/openshift_daemonset_config/defaults/main.yml
+++ b/roles/openshift_daemonset_config/defaults/main.yml
@@ -1,16 +1,19 @@
---
-openshift_daemonset_config_namespace: openshift-node
-openshift_daemonset_config_daemonset_name: ops-node-config
-openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}"
+openshift_daemonset_config_image: "centos:7"
+openshift_daemonset_config_monitoring_image: "openshifttools/oso-centos7-host-monitoring:latest"
+openshift_daemonset_config_namespace: openshift-config
+openshift_daemonset_config_daemonset_name: node-config
+openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}-configmap"
+openshift_daemonset_config_monitoring_pos: "false"
openshift_daemonset_config_node_selector:
config: config
-openshift_daemonset_config_sa_name: ops
+openshift_daemonset_config_sa_name: configurator
openshift_daemonset_config_configmap_files: {}
openshift_daemonset_config_configmap_literals: {}
openshift_daemonset_config_monitoring: False
openshift_daemonset_config_interval: 300
openshift_daemonset_config_script: config.sh
-openshift_daemonset_config_secret_name: operations-config-secret
+openshift_daemonset_config_secret_name: "{{ openshift_daemonset_config_daemonset_name }}-secret"
openshift_daemonset_config_secrets: {}
openshift_daemonset_config_runasuser: 0
openshift_daemonset_config_privileged: True
diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml
index 450cc9dca..f8f42b771 100644
--- a/roles/openshift_daemonset_config/tasks/main.yml
+++ b/roles/openshift_daemonset_config/tasks/main.yml
@@ -1,4 +1,9 @@
---
+- name: create the namespace
+ oc_project:
+ state: present
+ name: "{{ openshift_daemonset_config_namespace }}"
+
- name: add a sa
oc_serviceaccount:
name: "{{ openshift_daemonset_config_sa_name }}"
@@ -25,11 +30,6 @@
dest: "{{ item.value }}"
with_dict: "{{ openshift_daemonset_config_configmap_files }}"
-- name: create the namespace
- oc_project:
- state: present
- name: "{{ openshift_daemonset_config_namespace }}"
-
- name: lay down secrets
oc_secret:
state: present
@@ -39,6 +39,7 @@
contents: "{{ openshift_daemonset_config_secrets }}"
when:
- openshift_daemonset_config_secrets != {}
+ register: secout
- name: create the configmap
oc_configmap:
@@ -47,6 +48,7 @@
namespace: "{{ openshift_daemonset_config_namespace }}"
from_literal: "{{ openshift_daemonset_config_configmap_literals }}"
from_file: "{{ openshift_daemonset_config_configmap_files }}"
+ register: cmout
- name: deploy daemonset
oc_obj:
@@ -56,3 +58,4 @@
kind: daemonset
files:
- /tmp/daemonset.yml
+ force: "{{ True if cmout.changed or secout.changed else False | bool }}"
diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
index 9792f6d16..02cd5bcfd 100644
--- a/roles/openshift_daemonset_config/templates/daemonset.yml.j2
+++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2
@@ -33,7 +33,7 @@ spec:
hostIPC: true
containers:
- name: config
- image: centos:7
+ image: "{{ openshift_daemonset_config_image }}"
env:
- name: RESYNC_INTERVAL
value: "{{ openshift_daemonset_config_interval }}"
@@ -50,8 +50,8 @@ spec:
sh /opt/config/{{ openshift_daemonset_config_script }}
# sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again
- echo "Success, sleeping for ${RESYNC_INTERVAL}s"
- exec sleep ${RESYNC_INTERVAL}
+ echo "Success, sleeping for ${RESYNC_INTERVAL}s. Date: $(date)"
+ sleep ${RESYNC_INTERVAL}
# Return to perform the config
done
@@ -68,6 +68,8 @@ spec:
# Our node configuration
- mountPath: /opt/config
name: config
+ - mountPath: /opt/tmp_shared_config
+ name: tmp-shared-dir
{% if openshift_daemonset_config_secrets != {} %}
# Our delivered secrets
- mountPath: /opt/secrets
@@ -79,12 +81,14 @@ spec:
memory: {{ openshift_daemonset_config_resources.memory }}
{% if openshift_daemonset_config_monitoring %}
- name: monitoring
- image: openshifttools/oso-centos7-host-monitoring:latest
+ image: "{{ openshift_daemonset_config_monitoring_image }}"
+ env:
+ - name: OO_PAUSE_ON_START
+ value: "{{ openshift_daemonset_config_monitoring_pos }}"
securityContext:
# Must be root to read content
runAsUser: 0
privileged: true
-
volumeMounts:
- mountPath: /host
name: host
@@ -118,17 +122,23 @@ spec:
- mountPath: /host/var/cache/yum
subPath: var/cache/yum
name: host
- - mountPath: /container_setup/monitoring-config.yml
- subPath: monitoring-config.yaml
- name: config
+ readOnly: true
+ - mountPath: /container_setup
+ name: tmp-shared-dir
- mountPath: /opt/config
name: config
+{% if openshift_daemonset_config_secrets != {} %}
+ - mountPath: /opt/secrets
+ name: secrets
+{% endif %}
resources:
requests:
cpu: 10m
memory: 10Mi
{% endif %}
volumes:
+ - name: tmp-shared-dir
+ emptyDir: {}
- name: config
configMap:
name: {{ openshift_daemonset_config_configmap_name }}
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index b39c44b01..7223a5afe 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}"
+ src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0 %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
diff --git a/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml
new file mode 100644
index 000000000..6aa48f9c3
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml
@@ -0,0 +1,46 @@
+---
+- name: Check to see if PVC already exists
+ oc_obj:
+ state: list
+ kind: pvc
+ name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ namespace: "{{ openshift_metrics_project }}"
+ register: _metrics_pvc
+
+# _metrics_pvc.results.results | length > 0 returns a false positive
+# so we check for the presence of 'stderr' to determine if the obj exists or not
+# the RC for existing and not existing is both 0
+- when:
+ - _metrics_pvc.results.stderr is defined
+ block:
+ - name: generate hawkular-cassandra persistent volume claims
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
+ size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+ when:
+ - openshift_metrics_cassandra_storage_type != 'emptydir'
+ - openshift_metrics_cassandra_storage_type != 'dynamic'
+ changed_when: false
+
+ - name: generate hawkular-cassandra persistent volume claims (dynamic)
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
+ size: "{{ openshift_metrics_cassandra_pvc_size }}"
+ pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+ when: openshift_metrics_cassandra_storage_type == 'dynamic'
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 9026cc897..158e596ec 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -25,36 +25,7 @@
- set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
-- name: generate hawkular-cassandra persistent volume claims
- template:
- src: pvc.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
- vars:
- obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
- labels:
- metrics-infra: hawkular-cassandra
- access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
- size: "{{ openshift_metrics_cassandra_pvc_size }}"
- pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
- storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
- with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when:
- - openshift_metrics_cassandra_storage_type != 'emptydir'
- - openshift_metrics_cassandra_storage_type != 'dynamic'
- changed_when: false
-
-- name: generate hawkular-cassandra persistent volume claims (dynamic)
- template:
- src: pvc.j2
- dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
- vars:
- obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
- labels:
- metrics-infra: hawkular-cassandra
- access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
- size: "{{ openshift_metrics_cassandra_pvc_size }}"
- pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
- storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+- include_tasks: generate_cassandra_pvcs.yaml
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
- when: openshift_metrics_cassandra_storage_type == 'dynamic'
- changed_when: false
+ loop_control:
+ loop_var: metrics_pvc_index
diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
index 72415f9a6..e31433dbc 100644
--- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml
+++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml
@@ -15,6 +15,7 @@
name: "{{ item }}"
state: started
enabled: True
+ when: not openshift_is_atomic | bool
with_items:
- multipathd
- rpcbind
diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml
index 77be1f2b1..2bdb81632 100644
--- a/roles/openshift_openstack/defaults/main.yml
+++ b/roles/openshift_openstack/defaults/main.yml
@@ -93,3 +93,8 @@ openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size
openshift_openstack_etcd_volume_size: 2
openshift_openstack_lb_volume_size: 5
openshift_openstack_ephemeral_volumes: false
+
+
+# cloud-config
+openshift_openstack_disable_root: true
+openshift_openstack_user: openshift
diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2
index eb65f7cec..ccaa5d464 100644
--- a/roles/openshift_openstack/templates/user_data.j2
+++ b/roles/openshift_openstack/templates/user_data.j2
@@ -1,9 +1,9 @@
#cloud-config
-disable_root: true
+disable_root: {{ openshift_openstack_disable_root }}
system_info:
default_user:
- name: openshift
+ name: {{ openshift_openstack_user }}
sudo: ["ALL=(ALL) NOPASSWD: ALL"]
write_files:
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index c0abd483b..e86de1eab 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -219,7 +219,7 @@ spec:
- name: alertmanager
args:
- - -config.file=/etc/alertmanager/alertmanager.yml
+ - --config.file=/etc/alertmanager/alertmanager.yml
image: "{{ l_openshift_prometheus_alertmanager_image_prefix }}prometheus-alertmanager:{{ l_openshift_prometheus_alertmanager_image_version }}"
imagePullPolicy: IfNotPresent
resources:
diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml
index 31f6c1bb1..2c46b5700 100644
--- a/roles/openshift_prometheus/vars/default_images.yml
+++ b/roles/openshift_prometheus/vars/default_images.yml
@@ -8,5 +8,5 @@ l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertb
# image version defaults
l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0') }}"
l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}"
-l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}"
+l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.13.0') }}"
l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index a5fdae803..e6e261b52 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -313,7 +313,10 @@
- glusterfs_storageclass or glusterfs_s3_deploy
- include_tasks: glusterblock_deploy.yml
- when: glusterfs_block_deploy
+ when:
+ - glusterfs_block_deploy
+ #TODO: Remove this when multipathd will be available on atomic
+ - not openshift_is_atomic | bool
- block:
- name: Create heketi block secret
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index befacb04f..10c29fd37 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -48,7 +48,7 @@
glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"
glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
- glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
+ glusterfs_nodes: "{% if groups.glusterfs_registry is defined and groups['glusterfs_registry'] | length > 0 %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined and groups['glusterfs'] | length > 0 %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"
- include_tasks: glusterfs_common.yml
when:
diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml
index e01a56dc1..b0d155c2c 100644
--- a/roles/openshift_version/tasks/first_master.yml
+++ b/roles/openshift_version/tasks/first_master.yml
@@ -19,7 +19,7 @@
- set_fact:
openshift_pkg_version: -{{ openshift_version }}
when:
- - openshift_pkg_version is not defined
+ - openshift_pkg_version is not defined or openshift_pkg_version == ""
- openshift_upgrade_target is not defined
- block:
@@ -28,5 +28,5 @@
- set_fact:
openshift_image_tag: v{{ openshift_version }}
when: >
- openshift_image_tag is not defined
+ openshift_image_tag is not defined or openshift_image_tag == ""
or l_force_image_tag_to_version | bool
diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml
index 3ed1d2cfe..9eb38cb2b 100644
--- a/roles/openshift_version/tasks/first_master_containerized_version.yml
+++ b/roles/openshift_version/tasks/first_master_containerized_version.yml
@@ -6,6 +6,7 @@
openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
when:
- openshift_image_tag is defined
+ - openshift_image_tag != ""
- openshift_version is not defined
- not (openshift_version_reinit | default(false))
diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml
index 5d92f90c6..85e440513 100644
--- a/roles/openshift_version/tasks/first_master_rpm_version.yml
+++ b/roles/openshift_version/tasks/first_master_rpm_version.yml
@@ -5,6 +5,7 @@
openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
when:
- openshift_pkg_version is defined
+ - openshift_pkg_version != ""
- openshift_version is not defined
- not (openshift_version_reinit | default(false))