summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--inventory/hosts.example6
-rw-r--r--openshift-ansible.spec26
-rw-r--r--playbooks/aws/README.md14
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_prerequisites.yml6
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_sec_group.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml10
-rw-r--r--playbooks/aws/openshift-cluster/uninstall_vpc.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml2
-rw-r--r--playbooks/init/basic_facts.yml (renamed from playbooks/init/facts.yml)43
-rw-r--r--playbooks/init/cluster_facts.yml42
-rw-r--r--playbooks/init/main.yml11
-rw-r--r--playbooks/init/validate_hostnames.yml4
-rw-r--r--playbooks/openshift-etcd/upgrade.yml1
-rw-r--r--playbooks/openshift-master/scaleup.yml41
-rw-r--r--playbooks/openshift-node/scaleup.yml8
-rw-r--r--playbooks/openstack/README.md14
-rw-r--r--playbooks/openstack/openshift-cluster/provision.yml4
-rw-r--r--playbooks/openstack/sample-inventory/group_vars/OSEv3.yml2
-rw-r--r--playbooks/prerequisites.yml3
-rw-r--r--roles/lib_utils/filter_plugins/oo_filters.py14
-rw-r--r--roles/openshift_aws/defaults/main.yml4
-rw-r--r--roles/openshift_aws/tasks/uninstall_security_group.yml14
-rw-r--r--roles/openshift_aws/tasks/uninstall_ssh_keys.yml9
-rw-r--r--roles/openshift_aws/tasks/uninstall_vpc.yml36
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py32
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml2
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_node/defaults/main.yml12
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml19
-rw-r--r--roles/openshift_openstack/templates/heat_stack.yaml.j224
-rw-r--r--roles/openshift_openstack/templates/heat_stack_server.yaml.j22
-rw-r--r--roles/openshift_web_console/defaults/main.yml3
-rw-r--r--roles/openshift_web_console/tasks/install.yml106
-rw-r--r--roles/template_service_broker/defaults/main.yml2
-rw-r--r--roles/template_service_broker/tasks/install.yml15
-rw-r--r--roles/template_service_broker/tasks/remove.yml9
-rw-r--r--utils/src/ooinstall/openshift_ansible.py2
43 files changed, 450 insertions, 127 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 065cf9668..d6dd5a3c8 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.9.0-0.22.0 ./
+3.9.0-0.23.0 ./
diff --git a/inventory/hosts.example b/inventory/hosts.example
index da60b63e6..f9f331880 100644
--- a/inventory/hosts.example
+++ b/inventory/hosts.example
@@ -845,12 +845,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-# openshift-ansible will wait indefinitely for your input when it detects that the
+# The OpenShift-Ansible installer will fail when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
+# Setting this variable to false will override that check.
+#openshift_hostname_check=true
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 65ee71c56..c09e14c66 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.9.0
-Release: 0.22.0%{?dist}
+Release: 0.23.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -202,6 +202,30 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Jan 23 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.23.0
+- docker_image_availability: enable skopeo to use proxies (lmeyer@redhat.com)
+- Install base_packages earlier (mgugino@redhat.com)
+- allow uninstalling AWS objects created by prerequisite playbook
+ (jdiaz@redhat.com)
+- Bug 1536262: Default console and TSB node selector to
+ openshift_hosted_infra_selector (spadgett@redhat.com)
+- Migrate master-config.yaml asset config (spadgett@redhat.com)
+- Fix master scaleup play (mgugino@redhat.com)
+- use admin credentials for tsb install operations (bparees@redhat.com)
+- Fix etcd-upgrade sanity checks (mgugino@redhat.com)
+- Bug 1536253: Pass `--config` flag on oc commands when installing console
+ (spadgett@redhat.com)
+- Fix enterprise registry-console prefix (sdodson@redhat.com)
+- [release-3.7] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- [release-3.6] Fix enterprise registry console image prefix
+ (sdodson@redhat.com)
+- Bug 1512825 - add mux pod failed for Serial number 02 has already been issued
+ (nhosoi@redhat.com)
+- Remove old console asset config (spadgett@redhat.com)
+- Add support for Amazon EC2 C5 instance types (rteague@redhat.com)
+- Fix provider network support at openstack playbook (ltomasbo@redhat.com)
+
* Fri Jan 19 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.22.0
- Fix OpenStack readme (tomas@sedovic.cz)
- Quick installer: deprecate upgrades (vrutkovs@redhat.com)
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index d203b9cda..bdc98d1e0 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -198,3 +198,17 @@ At this point your cluster should be ready for workloads. Proceed to deploy app
### Still to come
There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities.
+
+## Uninstall / Deprovisioning
+
+At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step.
+
+To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.
+
+```
+ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml
+```
+
+This should result in removal of the security groups and VPC that were created.
+
+NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file.
diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
new file mode 100644
index 000000000..180c2281a
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml
@@ -0,0 +1,6 @@
+---
+- import_playbook: uninstall_sec_group.yml
+
+- import_playbook: uninstall_vpc.yml
+
+- import_playbook: uninstall_ssh_keypair.yml
diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
new file mode 100644
index 000000000..642e5b169
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete security groups
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
new file mode 100644
index 000000000..ec9caa51b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: remove ssh keypair(s)
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_ssh_keys.yml
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
new file mode 100644
index 000000000..4c988bcc5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: delete vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: uninstall_vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8ee83819e..ba783638d 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -5,7 +5,8 @@
g_new_master_hosts: []
g_new_node_hosts: []
-- import_playbook: ../../../init/facts.yml
+- import_playbook: ../../../init/basic_facts.yml
+- import_playbook: ../../../init/cluster_facts.yml
- name: Ensure firewall is not switched during upgrade
hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index fc1cbf32a..07be0b0d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -31,7 +31,7 @@
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
- hostvars[item].openshift is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
diff --git a/playbooks/init/facts.yml b/playbooks/init/basic_facts.yml
index df17c4043..06a4e7291 100644
--- a/playbooks/init/facts.yml
+++ b/playbooks/init/basic_facts.yml
@@ -4,15 +4,13 @@
any_errors_fatal: true
tasks:
-- name: Initialize host facts
+- name: Initialize basic host facts
# l_init_fact_hosts is passed in via play during control-plane-only
# upgrades and scale-up plays; otherwise oo_all_hosts is used.
hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
tasks:
- - name: load openshift_facts module
- import_role:
- name: openshift_facts
-
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
import_role:
@@ -58,41 +56,6 @@
- l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
- - name: Gather Cluster facts
- openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ openshift_deployment_type }}"
- deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
- http_proxy: "{{ openshift_http_proxy | default(None) }}"
- https_proxy: "{{ openshift_https_proxy | default(None) }}"
- no_proxy: "{{ openshift_no_proxy | default(None) }}"
- generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
-
- - name: Set fact of no_proxy_internal_hostnames
- openshift_facts:
- role: common
- local_facts:
- no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when:
- - openshift_http_proxy is defined or openshift_https_proxy is defined
- - openshift_generate_no_proxy_hosts | default(True) | bool
-
- - name: Initialize openshift.node.sdn_mtu
- openshift_facts:
- role: node
- local_facts:
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
-
- name: Initialize special first-master variables
hosts: oo_first_master
roles:
diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml
new file mode 100644
index 000000000..636679e32
--- /dev/null
+++ b/playbooks/init/cluster_facts.yml
@@ -0,0 +1,42 @@
+---
+- name: Initialize cluster facts
+ # l_init_fact_hosts is passed in via play during control-plane-only
+ # upgrades and scale-up plays; otherwise oo_all_hosts is used.
+ hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}"
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Gather Cluster facts
+ openshift_facts:
+ role: common
+ local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+ - name: Initialize openshift.node.sdn_mtu
+ openshift_facts:
+ role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml
index 8a3f4682d..9886691e0 100644
--- a/playbooks/init/main.yml
+++ b/playbooks/init/main.yml
@@ -1,4 +1,7 @@
---
+# skip_verison and l_install_base_packages are passed in via prerequistes.yml.
+# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml
+
- name: Initialization Checkpoint Start
hosts: all
gather_facts: false
@@ -15,7 +18,13 @@
- import_playbook: evaluate_groups.yml
-- import_playbook: facts.yml
+- import_playbook: basic_facts.yml
+
+# base_packages needs to be setup for openshift_facts.py to run correctly.
+- import_playbook: base_packages.yml
+ when: l_install_base_packages | default(False) | bool
+
+- import_playbook: cluster_facts.yml
- import_playbook: version.yml
when: not (skip_verison | default(False))
diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml
index 86e0b2416..b49f7dd08 100644
--- a/playbooks/init/validate_hostnames.yml
+++ b/playbooks/init/validate_hostnames.yml
@@ -25,7 +25,7 @@
when:
- lookupip.stdout != '127.0.0.1'
- lookupip.stdout not in ansible_all_ipv4_addresses
- - openshift_hostname_check | default(true)
+ - openshift_hostname_check | default(true) | bool
- name: Validate openshift_ip exists on node when defined
fail:
@@ -40,4 +40,4 @@
when:
- openshift_ip is defined
- openshift_ip not in ansible_all_ipv4_addresses
- - openshift_ip_check | default(true)
+ - openshift_ip_check | default(true) | bool
diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml
index b1ce6b220..77999d92c 100644
--- a/playbooks/openshift-etcd/upgrade.yml
+++ b/playbooks/openshift-etcd/upgrade.yml
@@ -3,5 +3,6 @@
vars:
skip_verison: True
l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}"
- import_playbook: private/upgrade_main.yml
diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml
index 7d31340a2..09e205afc 100644
--- a/playbooks/openshift-master/scaleup.yml
+++ b/playbooks/openshift-master/scaleup.yml
@@ -1,22 +1,43 @@
---
- import_playbook: ../init/evaluate_groups.yml
-- name: Ensure there are new_masters or new_nodes
+- name: Ensure there are new_masters and new_nodes
hosts: localhost
connection: local
gather_facts: no
tasks:
- fail:
+ # new_masters must be part of new_nodes as well; otherwise if new_nodes
+ # is not present, oo_nodes_to_config will contain all existing nodes.
msg: >
- Detected no new_masters or no new_nodes in inventory. Please
- add hosts to the new_masters and new_nodes host groups to add
- masters.
- when:
- - g_new_master_hosts | default([]) | length == 0
- - g_new_node_hosts | default([]) | length == 0
+ Detected no new_masters and/or no new_nodes in inventory. New
+ masters must be part of both new_masters and new_nodes groups.
+ If you are adding just new_nodes, use the
+ playbooks/openshift-node/scaleup.yml play.
+ when: >
+ g_new_master_hosts | default([]) | length == 0
+ or g_new_node_hosts | default([]) | length == 0
-# Need a better way to do the above check for node without
-# running evaluate_groups and init/main.yml
-- import_playbook: ../init/main.yml
+- name: Ensure there are new_masters and new_nodes
+ hosts: oo_masters_to_config
+ connection: local
+ gather_facts: no
+ tasks:
+ - fail:
+ # new_masters must be part of new_nodes as well;
+ msg: >
+ Each host in new_masters must also appear in new_nodes
+ when: inventory_hostname not in groups['oo_nodes_to_config']
+
+- import_playbook: ../prerequisites.yml
+ vars:
+ l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config"
+ l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config"
+ l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}"
+
+- import_playbook: ../init/version.yml
+ vars:
+ l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master"
+ l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config"
- import_playbook: private/scaleup.yml
diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml
index cc03b72a2..9cc7263b7 100644
--- a/playbooks/openshift-node/scaleup.yml
+++ b/playbooks/openshift-node/scaleup.yml
@@ -12,6 +12,14 @@
new_nodes host group to add nodes.
when:
- g_new_node_hosts | default([]) | length == 0
+ - fail:
+ msg: >
+ Please run playbooks/openshift-master/scaleup.yml if you need to
+ scale up both masters and nodes. This playbook is only needed if
+ you are only adding new nodes and not new masters.
+ when:
+ - g_new_node_hosts | default([]) | length > 0
+ - g_new_master_hosts | default([]) | length > 0
# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to
# g_new_node_hosts via evaluate_groups.yml
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
index d64be06e5..842bb34de 100644
--- a/playbooks/openstack/README.md
+++ b/playbooks/openstack/README.md
@@ -30,15 +30,17 @@ version 10) or newer. It must also satisfy these requirements:
- look at
the [Minimum Hardware Requirements page][hardware-requirements]
for production
-* The keypair for SSH must be available in openstack
-* `keystonerc` file that lets you talk to the openstack services
+* The keypair for SSH must be available in OpenStack
+* `keystonerc` file that lets you talk to the OpenStack services
* NOTE: only Keystone V2 is currently supported
+* A host with the supported version of [Ansible][ansible] installed, see the
+ [Setup section of the openshift-ansible README][openshift-ansible-setup]
+ for details on the requirements.
Optional:
* External Neutron network with a floating IP address pool
-
## Installation
There are four main parts to the installation:
@@ -68,12 +70,11 @@ First, you need to select where to run [Ansible][ansible] from (the
*Ansible host*). This can be the computer you read this guide on or an
OpenStack VM you'll create specifically for this purpose.
-We will use
-a
+This guide will use a
[Docker image that has all the dependencies installed][control-host-image] to
make things easier. If you don't want to use Docker, take a look at
the [Ansible host dependencies][ansible-dependencies] and make sure
-they're installed.
+they are installed.
Your *Ansible host* needs to have the following:
@@ -222,6 +223,7 @@ advanced configuration:
[ansible]: https://www.ansible.com/
[openshift-ansible]: https://github.com/openshift/openshift-ansible
+[openshift-ansible-setup]: https://github.com/openshift/openshift-ansible#setup
[devstack]: https://docs.openstack.org/devstack/
[tripleo]: http://tripleo.org/
[ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node
diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml
index a38d7bff7..73c1926a0 100644
--- a/playbooks/openstack/openshift-cluster/provision.yml
+++ b/playbooks/openstack/openshift-cluster/provision.yml
@@ -26,8 +26,8 @@
- name: Gather facts for the new nodes
setup:
-- name: set common facts
- import_playbook: ../../init/facts.yml
+- import_playbook: ../../init/basic_facts.yml
+- import_playbook: ../../init/cluster_facts.yml
# TODO(shadower): consider splitting this up so people can stop here
diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
index a8663f946..1287b25f3 100644
--- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
+++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
@@ -43,7 +43,7 @@ openshift_hosted_registry_wait: True
# NOTE(shadower): the hostname check seems to always fail because the
# host's floating IP address doesn't match the address received from
# inside the host.
-openshift_override_hostname_check: true
+openshift_hostname_check: false
# For POCs or demo environments that are using smaller instances than
# the official recommended values for RAM and DISK, uncomment the line below.
diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml
index 938bd3bc6..0b76ca862 100644
--- a/playbooks/prerequisites.yml
+++ b/playbooks/prerequisites.yml
@@ -4,14 +4,13 @@
- import_playbook: init/main.yml
vars:
skip_verison: True
+ l_install_base_packages: True
- import_playbook: init/validate_hostnames.yml
when: not (skip_validate_hostnames | default(False))
- import_playbook: init/repos.yml
-- import_playbook: init/base_packages.yml
-
# This is required for container runtime for crio, only needs to run once.
- name: Configure os_firewall
hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}"
diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py
index 9f73510c4..ef996fefe 100644
--- a/roles/lib_utils/filter_plugins/oo_filters.py
+++ b/roles/lib_utils/filter_plugins/oo_filters.py
@@ -4,6 +4,7 @@
"""
Custom filters for use in openshift-ansible
"""
+import json
import os
import pdb
import random
@@ -586,6 +587,18 @@ that result to this filter plugin.
return secret_name
+def lib_utils_oo_l_of_d_to_csv(input_list):
+ """Map a list of dictionaries, input_list, into a csv string
+ of json values.
+
+ Example input:
+ [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
+ Example output:
+ u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
+ """
+ return ','.join(json.dumps(x) for x in input_list)
+
+
def map_from_pairs(source, delim="="):
''' Returns a dict given the source and delim delimited '''
if source == '':
@@ -623,5 +636,6 @@ class FilterModule(object):
"lib_utils_oo_contains_rule": lib_utils_oo_contains_rule,
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
+ "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
"map_from_pairs": map_from_pairs
}
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index efd2468b2..a729e8dbd 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -301,3 +301,7 @@ openshift_aws_node_user_data: ''
openshift_aws_node_config_namespace: openshift-node
openshift_aws_masters_groups: masters,etcd,nodes
+
+# By default, don't delete things like the shared IAM instance
+# profile and uploaded ssh keys
+openshift_aws_enable_uninstall_shared_objects: False
diff --git a/roles/openshift_aws/tasks/uninstall_security_group.yml b/roles/openshift_aws/tasks/uninstall_security_group.yml
new file mode 100644
index 000000000..55d40e8ec
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_security_group.yml
@@ -0,0 +1,14 @@
+---
+- name: delete the node group sgs
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name}}"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
+
+- name: delete the k8s sgs for the node group
+ oo_ec2_group:
+ state: absent
+ name: "{{ item.value.name }}_k8s"
+ region: "{{ openshift_aws_region }}"
+ with_dict: "{{ openshift_aws_node_security_groups }}"
diff --git a/roles/openshift_aws/tasks/uninstall_ssh_keys.yml b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
new file mode 100644
index 000000000..27e42da53
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_ssh_keys.yml
@@ -0,0 +1,9 @@
+---
+- name: Remove the public keys for the user(s)
+ ec2_key:
+ state: absent
+ name: "{{ item.key_name }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
+ no_log: True
+ when: openshift_aws_enable_uninstall_shared_objects | bool
diff --git a/roles/openshift_aws/tasks/uninstall_vpc.yml b/roles/openshift_aws/tasks/uninstall_vpc.yml
new file mode 100644
index 000000000..ecf39f694
--- /dev/null
+++ b/roles/openshift_aws/tasks/uninstall_vpc.yml
@@ -0,0 +1,36 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+- debug:
+ var: vpcout
+ verbosity: 1
+
+- when: vpcout.vpcs | length > 0
+ block:
+ - name: delete the vpc igw
+ ec2_vpc_igw:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: igw
+
+ - name: delete the vpc subnets
+ ec2_vpc_subnet:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ cidr: "{{ item.cidr }}"
+ az: "{{ item.az }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
+
+ - name: Delete AWS VPC
+ ec2_vpc_net:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
+ register: vpc
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index ac6ffbbad..d298fbab2 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -40,7 +40,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
# command for checking if remote registries have an image, without docker pull
- skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_command = "{proxyvars} timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
@@ -76,11 +76,20 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if oreg_auth_user != '' and oreg_auth_password != '':
oreg_auth_user = self.template_var(oreg_auth_user)
oreg_auth_password = self.template_var(oreg_auth_password)
- self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+ self.skopeo_command_creds = quote("--creds={}:{}".format(oreg_auth_user, oreg_auth_password))
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
+ # take note of any proxy settings needed
+ proxies = []
+ for var in ['http_proxy', 'https_proxy', 'no_proxy']:
+ # ansible vars are openshift_http_proxy, openshift_https_proxy, openshift_no_proxy
+ value = self.get_var("openshift_" + var, default=None)
+ if value:
+ proxies.append(var.upper() + "=" + quote(self.template_var(value)))
+ self.skopeo_proxy_vars = " ".join(proxies)
+
def is_active(self):
"""Skip hosts with unsupported deployment types."""
deployment_type = self.get_var("openshift_deployment_type")
@@ -249,11 +258,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not self.reachable_registries[registry]:
continue # do not keep trying unreachable registries
- args = dict(registry=registry, image=image)
- args["tls"] = "false" if registry in self.registries["insecure"] else "true"
- args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
+ args = dict(
+ proxyvars=self.skopeo_proxy_vars,
+ tls="false" if registry in self.registries["insecure"] else "true",
+ creds=self.skopeo_command_creds if registry == self.registries["oreg"] else "",
+ registry=quote(registry),
+ image=quote(image),
+ )
- result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
+ result = self.execute_module_with_retries("command", {
+ "_uses_shell": True,
+ "_raw_params": self.skopeo_command.format(**args),
+ })
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
@@ -263,6 +279,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def connect_to_registry(self, registry):
"""Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ if self.skopeo_proxy_vars != "":
+ # assume we can't connect directly; just waive the test
+ return True
+
# test a simple TCP connection
host, _, port = registry.partition(":")
port = port or 443
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index cc3159a32..0786e2d2f 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
index 9f2e6125d..ccea54aaf 100644
--- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
index f04ce06d3..15ad4e9af 100644
--- a/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.8/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
index c178cf432..7acefa0f0 100644
--- a/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.9/enterprise/registry-console.yaml
@@ -102,7 +102,7 @@ objects:
parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "openshift3/"
+ value: "registry.access.redhat.com/openshift3/"
- description: 'Specify component name for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", use base name "registry-console"'
name: IMAGE_BASENAME
value: "registry-console"
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 0d7f8c056..a40449bf6 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -19,7 +19,7 @@
command: >
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
- --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test --overwrite=false
check_mode: no
when:
- not ca_key_file.stat.exists
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index bc817075d..d28d1d160 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -30,7 +30,7 @@
{{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
- --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt --overwrite=false
check_mode: no
when:
- cert_info.hostnames is defined
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0b10413c5..5864d3c03 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -77,6 +77,18 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }
l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+openshift_node_syscon_auth_mounts_l:
+- type: bind
+ source: "{{ oreg_auth_credentials_path }}"
+ destination: "/root/.docker"
+ options:
+ - ro
+
+# If we need to add new mounts in the future, or the user wants to mount data.
+# This should be in the same format as auth_mounts_l above.
+openshift_node_syscon_add_mounts_l: []
+
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_node_image_dict:
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 06b879050..008f209d7 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -14,4 +14,23 @@
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift_docker_service_name }}.service"
- "MASTER_SERVICE={{ openshift_service_type }}.service"
+ - 'ADDTL_MOUNTS={{ l_node_syscon_add_mounts2 }}'
state: latest
+ vars:
+ # We need to evaluate some variables here to ensure
+ # l_bind_docker_reg_auth is evaluated after registry_auth.yml has been
+ # processed.
+
+ # Determine if we want to include auth credentials mount.
+ l_node_syscon_auth_mounts_l: "{{ l_bind_docker_reg_auth | ternary(openshift_node_syscon_auth_mounts_l,[]) }}"
+
+ # Join any user-provided mounts and auth_mounts into a combined list.
+ l_node_syscon_add_mounts_l: "{{ openshift_node_syscon_add_mounts_l | union(l_node_syscon_auth_mounts_l) }}"
+
+ # We must prepend a ',' here to ensure the value is inserted properly into an
+ # existing json list in the container's config.json
+ # lib_utils_oo_l_of_d_to_csv is a custom filter plugin in roles/lib_utils/oo_filters.py
+ l_node_syscon_add_mounts: ",{{ l_node_syscon_add_mounts_l | lib_utils_oo_l_of_d_to_csv }}"
+ # if we have just a ',' then both mount lists were empty, we don't want to add
+ # anything to config.json
+ l_node_syscon_add_mounts2: "{{ (l_node_syscon_add_mounts != ',') | bool | ternary(l_node_syscon_add_mounts,'') }}"
diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2
index 1be5d3a62..8e7c6288a 100644
--- a/roles/openshift_openstack/templates/heat_stack.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2
@@ -523,7 +523,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -589,8 +589,13 @@ resources:
secgrp:
- { get_resource: lb-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_lb_volume_size }}
{% if not openshift_openstack_provider_network_name %}
@@ -655,7 +660,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -725,7 +730,7 @@ resources:
floating_network:
if:
- no_floating
- - null
+ - ''
- {{ openshift_openstack_external_network_name }}
{% if openshift_openstack_provider_network_name %}
attach_float_net: false
@@ -792,8 +797,13 @@ resources:
{% endif %}
- { get_resource: infra-secgrp }
- { get_resource: common-secgrp }
-{% if not openshift_openstack_provider_network_name %}
- floating_network: {{ openshift_openstack_external_network_name }}
+ floating_network:
+ if:
+ - no_floating
+ - ''
+ - {{ openshift_openstack_external_network_name }}
+{% if openshift_openstack_provider_network_name %}
+ attach_float_net: false
{% endif %}
volume_size: {{ openshift_openstack_infra_volume_size }}
{% if openshift_openstack_infra_server_group_policies|length > 0 %}
diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
index 1e73c9e1c..29b09f3c9 100644
--- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
+++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2
@@ -102,13 +102,11 @@ parameters:
label: Attach-float-net
description: A switch for floating network port connection
-{% if not openshift_openstack_provider_network_name %}
floating_network:
type: string
default: ''
label: Floating network
description: Network to allocate floating IP from
-{% endif %}
availability_zone:
type: string
diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml
index 4f395398c..c747f73a8 100644
--- a/roles/openshift_web_console/defaults/main.yml
+++ b/roles/openshift_web_console/defaults/main.yml
@@ -1,3 +1,2 @@
---
-# TODO: This is temporary and will be updated to use taints and tolerations so that the console runs on the masters
-openshift_web_console_nodeselector: {"region":"infra"}
+openshift_web_console_nodeselector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml
index de852e80b..ead62799a 100644
--- a/roles/openshift_web_console/tasks/install.yml
+++ b/roles/openshift_web_console/tasks/install.yml
@@ -21,7 +21,7 @@
node_selector:
- ""
-- name: Make temp directory for the web console config files
+- name: Make temp directory for web console templates
command: mktemp -d /tmp/console-ansible-XXXXXX
register: mktemp
changed_when: False
@@ -31,7 +31,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: false
-- name: Copy the web console config template to temp directory
+- name: Copy web console templates to temp directory
copy:
src: "{{ __console_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -40,31 +40,87 @@
- "{{ __console_rbac_file }}"
- "{{ __console_config_file }}"
-- name: Update the web console config properties
- yedit:
- src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- edits:
- - key: clusterInfo#consolePublicURL
- # Must have a trailing slash
- value: "{{ openshift.master.public_console_url }}/"
- - key: clusterInfo#masterPublicURL
- value: "{{ openshift.master.public_api_url }}"
- - key: clusterInfo#logoutPublicURL
- value: "{{ openshift.master.logout_url | default('') }}"
- - key: features#inactivityTimeoutMinutes
- value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
- - key: extensions#scriptURLs
- value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
- - key: extensions#stylesheetURLs
- value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
- - key: extensions#properties
- value: "{{ openshift_web_console_extension_properties | default({}) }}"
- separator: '#'
- state: present
+# Check if an existing webconsole-config config map exists. If so, use those
+# contents so we don't overwrite changes.
+- name: Read the existing web console config map
+ oc_configmap:
+ namespace: openshift-web-console
+ name: webconsole-config
+ state: list
+ register: webconsole_config_map
+
+- set_fact:
+ existing_config_map_data: "{{ webconsole_config_map.results.results[0].data | default({}) }}"
+
+- name: Copy the existing web console config to temp directory
+ copy:
+ content: "{{ existing_config_map_data['webconsole-config.yaml'] }}"
+ dest: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ when: existing_config_map_data['webconsole-config.yaml'] is defined
+
+# Generate a new config when a config map is not defined.
+- when: existing_config_map_data['webconsole-config.yaml'] is not defined
+ block:
+ # Migrate the previous master-config.yaml asset config if it exists into the new
+ # web console config config map.
+ - name: Read existing assetConfig in master-config.yaml
+ slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: master_config_output
+
+ - set_fact:
+ config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}"
+
+ # Update properties in the config template based on inventory vars when the
+ # asset config does not exist.
+ - name: Set web console config properties from inventory variables
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ # Must have a trailing slash
+ value: "{{ openshift.master.public_console_url }}/"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ openshift.master.public_api_url }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ openshift.master.logout_url | default('') }}"
+ - key: features#inactivityTimeoutMinutes
+ value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}"
+ - key: extensions#scriptURLs
+ value: "{{ openshift_web_console_extension_script_urls | default([]) }}"
+ - key: extensions#stylesheetURLs
+ value: "{{ openshift_web_console_extension_stylesheet_urls | default([]) }}"
+ - key: extensions#properties
+ value: "{{ openshift_web_console_extension_properties | default({}) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is not defined
+
+ - name: Migrate assetConfig from master-config.yaml
+ yedit:
+ src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
+ edits:
+ - key: clusterInfo#consolePublicURL
+ value: "{{ config_to_migrate.assetConfig.publicURL }}"
+ - key: clusterInfo#masterPublicURL
+ value: "{{ config_to_migrate.assetConfig.masterPublicURL }}"
+ - key: clusterInfo#logoutPublicURL
+ value: "{{ config_to_migrate.assetConfig.logoutURL | default('') }}"
+ - key: clusterInfo#metricsPublicURL
+ value: "{{ config_to_migrate.assetConfig.metricsPublicURL | default('') }}"
+ - key: clusterInfo#loggingPublicURL
+ value: "{{ config_to_migrate.assetConfig.loggingPublicURL | default('') }}"
+ - key: servingInfo#maxRequestsInFlight
+ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}"
+ - key: servingInfo#requestTimeoutSeconds
+ value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}"
+ separator: '#'
+ state: present
+ when: config_to_migrate.assetConfig is defined
- slurp:
src: "{{ mktemp.stdout }}/{{ __console_config_file }}"
- register: config
+ register: updated_console_config
- name: Reconcile with the web console RBAC file
shell: >
@@ -74,7 +130,7 @@
- name: Apply the web console template file
shell: >
{{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
- --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}"
--param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
--param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
--param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index c32872d24..3465832cc 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -3,4 +3,4 @@
template_service_broker_remove: False
template_service_broker_install: True
openshift_template_service_broker_namespaces: ['openshift']
-template_service_broker_selector: { "region": "infra" }
+template_service_broker_selector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}"
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 82b211032..4e6ad2ae5 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -22,6 +22,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -43,16 +48,18 @@
- name: Apply template file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig
+ -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
--param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
--param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
--param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }}
- | {{ openshift_client_binary }} apply -f -
+ | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# reconcile with rbac
- name: Reconcile with RBAC file
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}"
+ | {{ openshift_client_binary }} auth reconcile --config={{ mktemp.stdout }}/admin.kubeconfig -f -
# Check that the TSB is running
- name: Verify that TSB is running
@@ -79,7 +86,7 @@
# Register with broker
- name: Register TSB with broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f -
- file:
state: absent
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index 767e8ddc1..48dc1327e 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -3,6 +3,11 @@
register: mktemp
changed_when: False
+- name: Copy admin client config
+ command: >
+ cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: false
+
- copy:
src: "{{ __tsb_files_location }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
@@ -12,11 +17,11 @@
- name: Delete TSB broker
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -
+ {{ openshift_client_binary }} process --config={{ mktemp.stdout }}/admin.kubeconfig -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --config={{ mktemp.stdout }}/admin.kubeconfig --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 216664cd0..84a76fa53 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -122,7 +122,7 @@ def write_inventory_vars(base_inventory, lb):
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
- base_inventory.write('openshift_override_hostname_check=true\n')
+ base_inventory.write('openshift_hostname_check=false\n')
if lb is not None:
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))