summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/tasks/main.yml18
-rw-r--r--roles/etcd/defaults/main.yaml11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml39
-rw-r--r--roles/etcd/templates/etcd.conf.j212
-rw-r--r--roles/etcd_ca/tasks/main.yml2
-rw-r--r--roles/etcd_certificates/tasks/client.yml14
-rw-r--r--roles/etcd_certificates/tasks/main.yml4
-rw-r--r--roles/etcd_certificates/tasks/server.yml24
-rw-r--r--roles/etcd_common/defaults/main.yml8
-rw-r--r--roles/etcd_common/tasks/main.yml13
-rw-r--r--roles/etcd_common/templates/host_int_map.j213
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/tasks/main.yml12
-rw-r--r--roles/flannel_register/README.md2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/flannel_register/tasks/main.yml6
-rw-r--r--roles/kube_nfs_volumes/README.md2
-rw-r--r--roles/nuage_master/files/serviceaccount.sh63
-rw-r--r--roles/nuage_master/handlers/main.yaml2
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml51
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j26
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/handlers/main.yaml4
-rw-r--r--roles/nuage_node/tasks/main.yaml14
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_builddefaults/meta/main.yml15
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml24
-rw-r--r--roles/openshift_builddefaults/vars/main.yml15
-rw-r--r--roles/openshift_cloud_provider/templates/openstack.conf.j27
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml1
-rw-r--r--roles/openshift_common/README.md1
-rw-r--r--roles/openshift_common/tasks/main.yml1
-rw-r--r--roles/openshift_docker/tasks/main.yml9
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml3
-rw-r--r--roles/openshift_etcd/meta/main.yml2
-rw-r--r--roles/openshift_etcd_certificates/meta/main.yml16
-rw-r--r--roles/openshift_etcd_facts/meta/main.yml15
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml5
-rwxr-xr-xroles/openshift_examples/examples-sync.sh4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml438
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml28
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json5
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json5
-rw-r--r--roles/openshift_expand_partition/README.md4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py212
-rw-r--r--roles/openshift_facts/tasks/main.yml26
-rw-r--r--roles/openshift_hosted/tasks/router.yml1
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml40
-rw-r--r--roles/openshift_manage_node/tasks/main.yml2
-rw-r--r--roles/openshift_master/defaults/main.yml8
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml45
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j215
-rw-r--r--roles/openshift_master/templates/htpasswd.j25
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j215
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j215
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml8
-rw-r--r--roles/openshift_master_facts/tasks/main.yml11
-rw-r--r--roles/openshift_master_facts/vars/main.yml14
-rw-r--r--roles/openshift_metrics/README.md57
-rw-r--r--roles/openshift_metrics/meta/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/main.yaml57
-rw-r--r--roles/openshift_metrics/vars/main.yaml19
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml6
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh55
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml5
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml15
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml27
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml9
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j24
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml12
-rw-r--r--roles/openshift_storage_nfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md4
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml12
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml6
88 files changed, 1257 insertions, 472 deletions
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 878d5fea8..e4a31c692 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -22,7 +22,7 @@
- name: Install docker
action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
- when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt')
+ when: not openshift.common.is_atomic | bool and docker_downgrade_result | skipped
# If docker were enabled and started before we downgraded it may have entered a
# failed state. Check for that and clear it if necessary.
@@ -69,6 +69,22 @@
reg_flag: --insecure-registry
notify:
- restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ notify:
+ - restart docker
- name: Set various docker options
lineinfile:
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index e6b10cab7..a2212bacd 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,5 @@
---
-etcd_service: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
-etcd_interface: "{{ ansible_default_ipv4.interface }}"
+etcd_service: "{{ 'etcd' if not etcd_is_containerized | bool else 'etcd_container' }}"
etcd_client_port: 2379
etcd_peer_port: 2380
etcd_url_scheme: http
@@ -9,10 +8,10 @@ etcd_peer_url_scheme: http
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
+etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
+etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_data_dir: /var/lib/etcd/
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 36906b347..a71b36237 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -16,6 +16,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_docker }
- { role: os_firewall }
- { role: etcd_common }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index afec6b30b..a798dc973 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,36 +1,35 @@
---
-- fail:
- msg: Interface {{ etcd_interface }} not found
- when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
-
-- fail:
- msg: IPv4 address not found for {{ etcd_interface }}
- when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+- name: Set hostname and ip facts
+ set_fact:
+ # Store etcd_hostname and etcd_ip such that they will be available
+ # in hostvars. Defaults for these variables are set in etcd_common.
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
action: "{{ ansible_pkg_mgr }} name=etcd state=present"
- when: not openshift.common.is_containerized | bool
+ when: not etcd_is_containerized | bool
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Install etcd container service file
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
register: install_etcd_result
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Ensure etcd datadir exists
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
file:
path: "{{ etcd_data_dir }}"
state: directory
mode: 0700
- name: Disable system etcd when containerized
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
service:
name: etcd
state: stopped
@@ -42,27 +41,27 @@
changed_when: false
- name: Mask system etcd when containerized
- when: openshift.common.is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
+ when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
command: systemctl mask etcd
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and ( install_etcd_result | changed )
+ when: etcd_is_containerized | bool and ( install_etcd_result | changed )
- name: Validate permissions on the config dir
file:
path: "{{ etcd_conf_dir }}"
state: directory
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
mode: 0700
- name: Validate permissions on certificate files
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_url_scheme == 'https'
with_items:
- "{{ etcd_ca_file }}"
@@ -73,8 +72,8 @@
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_peer_url_scheme == 'https'
with_items:
- "{{ etcd_peer_ca_file }}"
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 28816fd87..cd048ec60 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -1,15 +1,15 @@
{% macro initial_cluster() -%}
-{% for host in groups[etcd_peers_group] -%}
+{% for host in etcd_peers -%}
{% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}
{%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
{% endmacro -%}
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
-ETCD_NAME={{ inventory_hostname }}
+{% if etcd_peers | default([]) | length > 1 %}
+ETCD_NAME={{ etcd_hostname }}
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
{% else %}
ETCD_NAME=default
@@ -23,7 +23,7 @@ ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_WALS=5
#ETCD_CORS=
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
+{% if etcd_peers | default([]) | length > 1 %}
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index cf7bc00a3..e1bb9baed 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install openssl
action: "{{ ansible_pkg_mgr }} name=openssl state=present"
- when: not openshift.common.is_atomic | bool
+ when: not etcd_is_atomic | bool
- file:
path: "{{ item }}"
diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml
index 6aa4883e0..b497a46c0 100644
--- a/roles/etcd_certificates/tasks/client.yml
+++ b/roles/etcd_certificates/tasks/client.yml
@@ -4,7 +4,7 @@
path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
state: directory
mode: 0700
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Create the client csr
command: >
@@ -12,14 +12,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}client.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
- with_items: etcd_needing_client_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Sign and create the client crt
command: >
@@ -32,11 +32,11 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.crt' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
- with_items: etcd_needing_client_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- file:
src: "{{ etcd_ca_cert }}"
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml
index 3bb715943..17092ca58 100644
--- a/roles/etcd_certificates/tasks/main.yml
+++ b/roles/etcd_certificates/tasks/main.yml
@@ -1,6 +1,6 @@
---
- include: client.yml
- when: etcd_needing_client_certs is defined and etcd_needing_client_certs
+ when: etcd_needing_client_certs | default([]) | length > 0
- include: server.yml
- when: etcd_needing_server_certs is defined and etcd_needing_server_certs
+ when: etcd_needing_server_certs | default([]) | length > 0
diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml
index 3499dcbef..934b8b805 100644
--- a/roles/etcd_certificates/tasks/server.yml
+++ b/roles/etcd_certificates/tasks/server.yml
@@ -4,7 +4,7 @@
path: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
state: directory
mode: 0700
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Create the server csr
command: >
@@ -12,14 +12,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}server.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Sign and create the server crt
command: >
@@ -32,8 +32,8 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Create the peer csr
command: >
@@ -41,14 +41,14 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}peer.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Sign and create the peer crt
command: >
@@ -61,11 +61,11 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
- with_items: etcd_needing_server_certs
+ SAN: "IP:{{ item.etcd_ip }}"
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- file:
src: "{{ etcd_ca_cert }}"
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 3af509448..1ff1d6ef8 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,6 +1,4 @@
---
-etcd_peers_group: oo_etcd_to_config
-
# etcd server vars
etcd_conf_dir: /etc/etcd
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -28,3 +26,9 @@ etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
etcd_ca_default_days: 365
+
+# etcd server & certificate vars
+etcd_hostname: "{{ inventory_hostname }}"
+etcd_ip: "{{ ansible_default_ipv4.address }}"
+etcd_is_atomic: False
+etcd_is_containerized: False
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
deleted file mode 100644
index be75fdab2..000000000
--- a/roles/etcd_common/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact:
- etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
-
-- fail:
- msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
- when: "'etcd_interface' in item.value and 'interface' not in item.value"
- with_dict: etcd_host_int_map | default({})
-
-- fail:
- msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
- when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
- with_dict: etcd_host_int_map | default({})
diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2
deleted file mode 100644
index 9c9c76413..000000000
--- a/roles/etcd_common/templates/host_int_map.j2
+++ /dev/null
@@ -1,13 +0,0 @@
----
-{% for host in groups[etcd_peers_group] %}
-{% set entry=hostvars[host] %}
-{{ entry.inventory_hostname }}:
-{% if 'etcd_interface' in entry %}
- etcd_interface: {{ entry.etcd_interface }}
-{% if entry.etcd_interface in entry.ansible_interfaces %}
- interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
-{% endif %}
-{% else %}
- interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
-{% endif %}
-{% endfor %}
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index f9b9ae7f1..981ea5c7a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -1,8 +1,8 @@
---
- name: restart flanneld
- sudo: true
+ become: yes
service: name=flanneld state=restarted
- name: restart docker
- sudo: true
+ become: yes
service: name=docker state=restarted
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index aa27b674e..6b6dfb423 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -1,11 +1,11 @@
---
- name: Install flannel
- sudo: true
+ become: yes
action: "{{ ansible_pkg_mgr }} name=flannel state=present"
when: not openshift.common.is_containerized | bool
- name: Set flannel etcd url
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -13,7 +13,7 @@
line: '\1{{ etcd_hosts|join(",") }}'
- name: Set flannel etcd key
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -21,7 +21,7 @@
line: '\1{{ flannel_etcd_key }}'
- name: Set flannel options
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -29,7 +29,7 @@
line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
- name: Enable flanneld
- sudo: true
+ become: yes
service:
name: flanneld
state: started
@@ -37,7 +37,7 @@
register: start_result
- name: Remove docker bridge ip
- sudo: true
+ become: yes
shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
notify:
- restart docker
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
index ba7541ab1..623c4c7cf 100644
--- a/roles/flannel_register/README.md
+++ b/roles/flannel_register/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | Description |
|---------------------|----------------------------------------------------|-------------------------------------------------|
-| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_network | {{ openshift.common.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
| flannel_etcd_key | /openshift.com/network | etcd prefix |
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 269d1a17c..b1279aa88 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,5 +1,5 @@
---
-flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
flannel_min_network: 172.30.5.0
flannel_subnet_len: 24
flannel_etcd_key: /openshift.com/network
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
index 1629157c8..845b7ef40 100644
--- a/roles/flannel_register/tasks/main.yml
+++ b/roles/flannel_register/tasks/main.yml
@@ -1,14 +1,14 @@
---
- name: Assures /etc/flannel dir exists
- sudo: true
+ become: yes
file: path=/etc/flannel state=directory
- name: Generate etcd configuration for etcd
- sudo: true
+ become: yes
template:
src: "flannel-config.json"
dest: "/etc/flannel/config.json"
- name: Insert flannel configuration into etcd
- sudo: true
+ become: yes
command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 1520f79b2..dd91ad8b1 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -94,7 +94,7 @@ partitions.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: yes
+ become: yes
roles:
- role: kube_nfs_volumes
disks: "/dev/sdb,/dev/sdc"
diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh
deleted file mode 100644
index f6fdb8a8d..000000000
--- a/roles/nuage_master/files/serviceaccount.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-# Parse CLI options
-for i in "$@"; do
- case $i in
- --master-cert-dir=*)
- MASTER_DIR="${i#*=}"
- CA_CERT=${MASTER_DIR}/ca.crt
- CA_KEY=${MASTER_DIR}/ca.key
- CA_SERIAL=${MASTER_DIR}/ca.serial.txt
- ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig
- ;;
- --server=*)
- SERVER="${i#*=}"
- ;;
- --output-cert-dir=*)
- OUTDIR="${i#*=}"
- CONFIG_FILE=${OUTDIR}/nuage.kubeconfig
- ;;
- esac
-done
-
-# If any are missing, print the usage and exit
-if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then
- echo "Invalid syntax: $@"
- echo "Usage:"
- echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/"
- echo "--master-cert-dir: Directory where the master's configuration is held"
- echo "--server: Address of Kubernetes API server (default port is 8443)"
- echo "--output-cert-dir: Directory to put artifacts in"
- echo ""
- echo "All options are required"
- exit 1
-fi
-
-# Login as admin so that we can create the service account
-oc login -u system:admin --config=$ADMIN_FILE || exit 1
-oc project default --config=$ADMIN_FILE
-
-ACCOUNT_CONFIG='
-{
- "apiVersion": "v1",
- "kind": "ServiceAccount",
- "metadata": {
- "name": "nuage"
- }
-}
-'
-
-# Create the account with the included info
-echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f -
-
-# Add the cluser-reader role, which allows this service account read access to
-# everything in the cluster except secrets
-oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE
-
-# Generate certificates and a kubeconfig for the service account
-oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage'
-
-# Verify the finalized kubeconfig
-if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then
- echo "Service account creation failed!"
- exit 1
-fi
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 5d133cf16..56224cf82 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart nuage-openshift-monitor
- sudo: true
+ become: yes
service: name=nuage-openshift-monitor state=restarted
- name: restart master
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index abeee3d71..b8eaede3b 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,22 +1,20 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
- sudo: true
+ become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
- sudo: true
+ become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
- name: Install Nuage Openshift Monitor
- sudo: true
+ become: yes
yum: name={{ nuage_openshift_rpm }} state=present
-- name: Run the service account creation script
- sudo: true
- script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }}
+- include: serviceaccount.yml
- name: Download the certs and keys
- sudo: true
+ become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- ca.crt
@@ -27,7 +25,7 @@
- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
- sudo: true
+ become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
notify:
- restart master
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
new file mode 100644
index 000000000..5b4af5824
--- /dev/null
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -0,0 +1,51 @@
+---
+- name: Create temporary directory for admin kubeconfig
+ command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig
+ register: nuage_tmp_conf_mktemp
+ changed_when: False
+
+- set_fact:
+ nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}"
+
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}}
+ changed_when: false
+
+- name: Create Admin Service Account
+ shell: >
+ echo {{ nuage_service_account_config | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n default
+ --config={{nuage_tmp_conf}}
+ -f -
+ register: osnuage_create_service_account
+ failed_when: "'already exists' not in osnuage_create_service_account.stderr and osnuage_create_service_account.rc != 0"
+ changed_when: osnuage_create_service_account.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{nuage_tmp_conf}}
+ with_items: "{{nuage_tasks}}"
+ register: osnuage_perm_task
+ failed_when: "'already exists' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0"
+ changed_when: osnuage_perm_task.rc == 0
+
+- name: Generate the node client config
+ command: >
+ {{ openshift.common.admin_binary }} create-api-client-config
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --client-dir={{ cert_output_dir }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.api_url }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
+ --basename='nuage'
+ --user={{ nuage_service_account }}
+
+- name: Clean temporary configuration file
+ command: >
+ rm -f {{nuage_tmp_conf}}
+ changed_when: false
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index 7228e646b..075de9d9e 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,6 +15,12 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# CSP admin user's password
+cspAdminPassword: {{ nuage_master_cspadminpasswd }}
+# Enterprise admin user name
+enterpriseAdminUser: {{ nuage_master_adminusername }}
+# Enterprise admin password
+enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server paramters
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index ec4562c77..d3536eb33 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,4 +1,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
@@ -15,3 +18,17 @@ nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonSe
nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_service_account: system:serviceaccount:default:nuage
+
+nuage_service_account_config:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
+
+nuage_tasks:
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+
+nuage_master_cspadminpasswd: ''
+nuage_master_adminusername: 'admin'
+nuage_master_adminuserpasswd: 'admin'
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 25482a845..5f2b97ae2 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,8 +1,8 @@
---
- name: restart vrs
- sudo: true
+ become: yes
service: name=openvswitch state=restarted
- name: restart node
- sudo: true
+ become: yes
service: name={{ openshift.common.service_type }}-node state=restarted
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d7dd53802..1146573d3 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -1,27 +1,27 @@
---
- name: Install Nuage VRS
- sudo: true
+ become: yes
yum: name={{ vrs_rpm }} state=present
- name: Set the uplink interface
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
- name: Set the Active Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
- name: Set the Standby Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
- name: Install plugin rpm
- sudo: true
+ become: yes
yum: name={{ plugin_rpm }} state=present
- name: Copy the certificates and keys
- sudo: true
+ become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- ca.crt
@@ -32,7 +32,7 @@
- include: certificates.yml
- name: Set the vsp-openshift.yaml
- sudo: true
+ become: yes
template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index a4d7052a7..86486259f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -8,7 +8,7 @@ ca_cert: "{{ vsp_openshift_dir }}/ca.crt"
api_server: "{{ openshift_node_master_api_url }}"
nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
nuage_mon_rest_server_url: "https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}"
-docker_bridge: "docker0"
+docker_bridge: "{{ nuage_docker_bridge | default('docker0') }}"
rest_client_cert: "{{ vsp_openshift_dir }}/nuageMonClient.crt"
rest_client_key: "{{ vsp_openshift_dir }}/nuageMonClient.key"
rest_server_ca_cert: "{{ vsp_openshift_dir }}/nuageMonCA.crt"
diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml
new file mode 100644
index 000000000..422d08400
--- /dev/null
+++ b/roles/openshift_builddefaults/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Build Defaults configuration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
new file mode 100644
index 000000000..c82aebe72
--- /dev/null
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Set builddefaults
+ openshift_facts:
+ role: builddefaults
+ # TODO: add ability to define builddefaults env vars sort of like this
+ # may need to move the config generation to a filter however.
+ # openshift_env: "{{ hostvars[inventory_hostname]
+ # | oo_merge_dicts(hostvars)
+ # | oo_openshift_env }}"
+ # openshift_env_structures:
+ # - 'openshift.builddefaults.env.*'
+ local_facts:
+ http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
+ git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
+ git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
+
+- name: Set builddefaults config structure
+ openshift_facts:
+ role: builddefaults
+ local_facts:
+ config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
+
diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml
new file mode 100644
index 000000000..9727c73a5
--- /dev/null
+++ b/roles/openshift_builddefaults/vars/main.yml
@@ -0,0 +1,15 @@
+---
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.builddefaults.git_http_proxy | default('', true) }}"
+ gitHTTPSProxy: "{{ openshift.builddefaults.git_https_proxy | default('', true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.builddefaults.http_proxy | default('', true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.builddefaults.https_proxy | default('', true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}"
diff --git a/roles/openshift_cloud_provider/templates/openstack.conf.j2 b/roles/openshift_cloud_provider/templates/openstack.conf.j2
index 1b70edc16..8a06b3a08 100644
--- a/roles/openshift_cloud_provider/templates/openstack.conf.j2
+++ b/roles/openshift_cloud_provider/templates/openstack.conf.j2
@@ -11,7 +11,6 @@ tenant-name = {{ openshift.cloudprovider.openstack.tenant_name }}
region = {{ openshift.cloudprovider.openstack.region }}
{% endif %}
{% if 'lb_subnet_id' in openshift.cloudprovider.openstack %}
-+
-+[LoadBalancer]
-+subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }}
-+{% endif %}
+[LoadBalancer]
+subnet-id = {{ openshift.cloudprovider.openstack.lb_subnet_id }}
+{% endif %}
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
index d45f62eca..1fc8a074a 100644
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ b/roles/openshift_cluster_metrics/tasks/main.yml
@@ -28,7 +28,6 @@
cluster-reader
system:serviceaccount:default:heapster
register: oex_cluster_header_role
- register: oex_cluster_header_role
failed_when: "'already exists' not in oex_cluster_header_role.stderr and oex_cluster_header_role.rc != 0"
changed_when: false
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index 87306d4a6..2a271854b 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -20,6 +20,7 @@ Role Variables
| openshift_ip | UNDEF | Internal IP address to use for this host |
| openshift_public_hostname | UNDEF | Public hostname to use for this host |
| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_portal_net | UNDEF | Service IP CIDR |
Dependencies
------------
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index eda43b9f8..4ec255dbc 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -27,6 +27,7 @@
use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
data_dir: "{{ openshift_data_dir | default(None) }}"
+ use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
# Using oo_image_tag_to_rpm_version here is a workaround for how
# openshift_version is set. That value is computed based on either RPM
diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml
index 23613b762..10f47f9b2 100644
--- a/roles/openshift_docker/tasks/main.yml
+++ b/roles/openshift_docker/tasks/main.yml
@@ -4,12 +4,13 @@
# openshift_image_tag correctly for upgrades.
- name: Set version when containerized
command: >
- docker run --rm {{ openshift.common.cli_image }}:latest version
+ docker run --rm {{ openshift.common.cli_image }} version
register: cli_image_version
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
- l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
+ l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-') if openshift.common.deployment_type == 'origin' else
+ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
@@ -23,6 +24,6 @@
with_items:
- role: docker
local_facts:
- openshift_image_tag: "{{ l_image_tag }}"
- openshift_version: "{{ l_image_tag if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
+ openshift_image_tag: "{{ l_image_tag | default(None) }}"
+ openshift_version: "{{ l_image_tag.split('-')[0] if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 89393168b..3acd2bba8 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -27,6 +27,9 @@
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
| default(omit) }}"
+ docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}"
+ docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}"
+ docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}"
- set_fact:
docker_options: >
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 5e5f96d44..7cc548f69 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -12,7 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_facts
+- role: openshift_etcd_facts
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_etcd_certificates/meta/main.yml b/roles/openshift_etcd_certificates/meta/main.yml
new file mode 100644
index 000000000..2725fdb51
--- /dev/null
+++ b/roles/openshift_etcd_certificates/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift etcd Certificates
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_etcd_facts
+- role: etcd_certificates
diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml
new file mode 100644
index 000000000..925aa9f92
--- /dev/null
+++ b/roles/openshift_etcd_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift etcd Facts
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
new file mode 100644
index 000000000..6f3894565
--- /dev/null
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -0,0 +1,5 @@
+---
+etcd_is_containerized: "{{ openshift.common.is_containerized }}"
+etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_hostname: "{{ openshift.common.hostname }}"
+etcd_ip: "{{ openshift.common.ip }}"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index af388f6cf..7d81ac927 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.0
+XPAAS_VERSION=ose-v1.3.0-1
ORIGIN_VERSION=${1:-v1.2}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
@@ -40,7 +40,7 @@ find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secre
wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
-cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
index 672eaaa09..70c906f8e 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mongodb-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
@@ -232,7 +232,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
index d94262dde..e39ee57c8 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mysql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
@@ -221,7 +221,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
index 5713411ad..347e01de3 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "postgresql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
@@ -220,7 +220,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
index c6cc98ce3..848e93c5f 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -69,6 +69,8 @@ objects:
value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- name: ES_OPS_RECOVER_AFTER_TIME
value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: MODE
+ value: ${MODE}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: logging-deployer
@@ -80,11 +82,11 @@ objects:
secretName: logging-deployer
parameters:
-
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ description: 'Specify image prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: registry.access.redhat.com/openshift3/
-
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set version "3.2.0"'
name: IMAGE_VERSION
value: "3.2.0"
-
@@ -148,4 +150,7 @@ parameters:
description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
name: ES_OPS_RECOVER_AFTER_TIME
value: "5m"
-
+-
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
index 9257b1f28..cc33f77d8 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
@@ -1,156 +1,290 @@
apiVersion: "v1"
-kind: "Template"
-metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
- tags: "infrastructure"
-labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
-objects:
+kind: "List"
+items:
-
- apiVersion: v1
- kind: Pod
+ apiVersion: "v1"
+ kind: "Template"
metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: logging-deployer
-parameters:
--
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
--
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
--
- description: "External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- required: true
--
- description: "External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
--
- description: "External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- required: true
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
--
- description: "Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
--
- description: "How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- required: true
--
- description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ - apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ secrets:
+ - name: logging-deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
-
- description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
--
- description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
--
- description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
--
- description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
--
- description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
--
- description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+ parameters:
+ -
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+ -
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+ -
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+ -
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+ -
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+ -
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
index 30d79acee..c620c46ec 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
@@ -54,8 +54,12 @@ objects:
value: ${IMAGE_VERSION}
- name: MASTER_URL
value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
- name: REDEPLOY
value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
- name: USE_PERSISTENT_STORAGE
value: ${USE_PERSISTENT_STORAGE}
- name: HAWKULAR_METRICS_HOSTNAME
@@ -66,6 +70,10 @@ objects:
value: ${CASSANDRA_PV_SIZE}
- name: METRIC_DURATION
value: ${METRIC_DURATION}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: metrics-deployer
@@ -83,7 +91,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "latest"
+ value: "v0.1.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -93,10 +101,18 @@ parameters:
name: HAWKULAR_METRICS_HOSTNAME
required: true
-
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
name: REDEPLOY
value: "false"
-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
description: "Set to true for persistent storage, set to false to use non persistent storage"
name: USE_PERSISTENT_STORAGE
value: "true"
@@ -112,3 +128,11 @@ parameters:
description: "How many days metrics should be stored for."
name: METRIC_DURATION
value: "7"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ name: METRIC_RESOLUTION
+ value: "10s"
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
index 6c143fc70..0d8dcffa1 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
@@ -5,7 +5,7 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.",
+ "description": "Jenkins service, with persistent storage. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
@@ -207,7 +207,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
index 3298ef40c..661bcbb69 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
index 82df67c4e..0518dfac7 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index aed4ec871..c9c7b378c 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -45,7 +45,7 @@ space on /dev/xvda, and the file system will be expanded to fill the new
partition space.
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -68,7 +68,7 @@ partition space.
* Create an ansible playbook, say `expandvar.yaml`:
```
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9218e12ae..643984982 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -56,12 +56,54 @@ def migrate_docker_facts(facts):
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
+
+ # log_options was originally meant to be a comma separated string, but
+ # we now prefer an actual list, with backward compatability:
+ if 'log_options' in facts['docker'] and \
+ isinstance(facts['docker']['log_options'], basestring):
+ facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
+
+ return facts
+
+# TODO: We should add a generic migration function that takes source and destination
+# paths and does the right thing rather than one function for common, one for node, etc.
+def migrate_common_facts(facts):
+ """ Migrate facts from various roles into common """
+ params = {
+ 'node': ('portal_net'),
+ 'master': ('portal_net')
+ }
+ if 'common' not in facts:
+ facts['common'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['common'][param] = facts[role].pop(param)
+ return facts
+
+def migrate_node_facts(facts):
+ """ Migrate facts from various roles into node """
+ params = {
+ 'common': ('dns_ip'),
+ }
+ if 'node' not in facts:
+ facts['node'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['node'][param] = facts[role].pop(param)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
- return migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_common_facts(migrated_facts)
+ migrated_facts = migrate_node_facts(migrated_facts)
+ migrated_facts = migrate_hosted_facts(migrated_facts)
+ return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
@@ -448,6 +490,27 @@ def set_metrics_facts_if_unset(facts):
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
+def set_dnsmasq_facts_if_unset(facts):
+ """ Set dnsmasq facts if not already present in facts
+ Args:
+ facts (dict) existing facts
+ Returns:
+ facts (dict) updated facts with values set if not previously set
+ """
+
+ if 'common' in facts:
+ if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']:
+ facts['common']['use_dnsmasq'] = True
+ else:
+ facts['common']['use_dnsmasq'] = False
+ if 'master' in facts and 'dns_port' not in facts['master']:
+ if facts['common']['use_dnsmasq']:
+ facts['master']['dns_port'] = 8053
+ else:
+ facts['master']['dns_port'] = 53
+
+ return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -586,11 +649,13 @@ def set_aggregate_facts(facts):
"""
all_hostnames = set()
internal_hostnames = set()
+ kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
@@ -607,9 +672,8 @@ def set_aggregate_facts(facts):
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
- first_svc_ip = first_ip(facts['master']['portal_net'])
- all_hostnames.add(first_svc_ip)
- internal_hostnames.add(first_svc_ip)
+ all_hostnames.add(kube_svc_ip)
+ internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
@@ -1154,7 +1218,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], str):
+ if isinstance(new[key], basestring):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1212,7 +1276,12 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
- facts[key] = copy.deepcopy(new[key])
+ # Watchout for JSON facts that sometimes load as strings.
+ # (can happen if the JSON contains a boolean)
+ if key in inventory_json_facts and isinstance(new[key], basestring):
+ facts[key] = yaml.safe_load(new[key])
+ else:
+ facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
@@ -1263,6 +1332,23 @@ def get_local_facts_from_file(filename):
return local_facts
+def sort_unique(alist):
+ """ Sorts and de-dupes a list
+
+ Args:
+ list: a list
+ Returns:
+ list: a sorted de-duped list
+ """
+
+ alist.sort()
+ out = list()
+ for i in alist:
+ if i not in out:
+ out.append(i)
+
+ return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1273,6 +1359,61 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+def set_proxy_facts(facts):
+ """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'common' in facts:
+ common = facts['common']
+ if 'http_proxy' in common or 'https_proxy' in common:
+ if 'generate_no_proxy_hosts' in common and \
+ common['generate_no_proxy_hosts']:
+ if 'no_proxy' in common and \
+ isinstance(common['no_proxy'], basestring):
+ common['no_proxy'] = common['no_proxy'].split(",")
+ else:
+ common['no_proxy'] = []
+ if 'no_proxy_internal_hostnames' in common:
+ common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
+ common['no_proxy'].append('.' + common['dns_domain'])
+ common['no_proxy'].append(common['hostname'])
+ common['no_proxy'] = sort_unique(common['no_proxy'])
+ facts['common'] = common
+
+ if 'builddefaults' in facts:
+ builddefaults = facts['builddefaults']
+ common = facts['common']
+ # Copy values from common to builddefaults
+ if 'http_proxy' not in builddefaults and 'http_proxy' in common:
+ builddefaults['http_proxy'] = common['http_proxy']
+ if 'https_proxy' not in builddefaults and 'https_proxy' in common:
+ builddefaults['https_proxy'] = common['https_proxy']
+ if 'no_proxy' not in builddefaults and 'no_proxy' in common:
+ builddefaults['no_proxy'] = common['no_proxy']
+ if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
+ builddefaults['git_http_proxy'] = builddefaults['http_proxy']
+ if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
+ builddefaults['git_https_proxy'] = builddefaults['https_proxy']
+ # If we're actually defining a proxy config then create kube_admission_plugin_config
+ # if it doesn't exist, then merge builddefaults[config] structure
+ # into kube_admission_plugin_config
+ if 'kube_admission_plugin_config' not in facts['master']:
+ facts['master']['kube_admission_plugin_config'] = dict()
+ if 'config' in builddefaults and ('http_proxy' in builddefaults or \
+ 'https_proxy' in builddefaults):
+ facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
+ facts['builddefaults'] = builddefaults
+
+ return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1406,7 +1547,8 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['cloudprovider',
+ known_roles = ['builddefaults',
+ 'cloudprovider',
'common',
'docker',
'etcd',
@@ -1490,9 +1632,11 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
+ facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
@@ -1519,6 +1663,7 @@ class OpenShiftFacts(object):
deployment_type=deployment_type,
hostname=hostname,
public_hostname=hostname,
+ portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
install_examples=True,
@@ -1546,7 +1691,7 @@ class OpenShiftFacts(object):
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
- embedded_dns=True, dns_port='53',
+ embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
@@ -1555,7 +1700,8 @@ class OpenShiftFacts(object):
auth_token_max_seconds=500,
oauth_grant_method='auto',
scheduler_predicates=scheduler_predicates,
- scheduler_priorities=scheduler_priorities)
+ scheduler_priorities=scheduler_priorities,
+ dynamic_provisioning_enabled=True)
if 'node' in roles:
defaults['node'] = dict(labels={}, annotations={},
@@ -1576,6 +1722,24 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
+ metrics=dict(
+ deploy=False,
+ duration=7,
+ resolution=10,
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'),
+ host=None,
+ access_modes=['ReadWriteMany'],
+ create_pv=True
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
@@ -1777,15 +1941,12 @@ class OpenShiftFacts(object):
if isinstance(val, basestring):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
+ # Convert legacy log_options comma sep string to a list if present:
+ if 'log_options' in new_local_facts['docker'] and \
+ isinstance(new_local_facts['docker']['log_options'], basestring):
+ new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
- for facts in new_local_facts.values():
- keys_to_delete = []
- if isinstance(facts, dict):
- for fact, value in facts.iteritems():
- if value == "" or value is None:
- keys_to_delete.append(fact)
- for key in keys_to_delete:
- del facts[key]
+ new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
@@ -1796,6 +1957,23 @@ class OpenShiftFacts(object):
self.changed = changed
return new_local_facts
+ def remove_empty_facts(self, facts=None):
+ """ Remove empty facts
+
+ Args:
+ facts (dict): facts to clean
+ """
+ facts_to_remove = []
+ for fact, value in facts.iteritems():
+ if isinstance(facts[fact], dict):
+ facts[fact] = self.remove_empty_facts(facts[fact])
+ else:
+ if value == "" or value == [""] or value is None:
+ facts_to_remove.append(fact)
+ for fact in facts_to_remove:
+ del facts[fact]
+ return facts
+
def validate_local_facts(self, facts=None):
""" Validate local facts
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 36def57c8..ff726ae24 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -5,15 +5,13 @@
when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
- name: Detecting Operating System
- shell: ls /run/ostree-booted
- ignore_errors: yes
- failed_when: false
- changed_when: false
- register: ostree_output
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
# Locally setup containerized facts for now
- set_fact:
- l_is_atomic: "{{ ostree_output.rc == 0 }}"
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
@@ -33,3 +31,19 @@
is_containerized: "{{ l_is_containerized | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+
+# had to be done outside of the above because hostname isn't yet set
+- name: Gather hostnames for proxy configuration
+ openshift_facts:
+ role: common
+ local_facts:
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 6a36f74b2..4ccbf4430 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -32,6 +32,7 @@
{{ openshift.common.client_binary }} --api-version='v1' -o json
get nodes -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: openshift_hosted_router_nodes_json
+ changed_when: false
when: openshift.hosted.router.replicas | default(None) == None
- name: Collect nodes matching router selector
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index d8a5b62a0..072f7bb4e 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -15,43 +15,52 @@
changed_when: False
- name: "Create logging project"
- command: {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
- name: "Changing projects"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
- name: "Creating logging deployer secret"
- command: " {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
register: secret_output
failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
- name: "Copy serviceAccount file"
- copy: dest=/tmp/logging-deployer-sa.yaml
- src={{role_path}}/files/logging-deployer-sa.yaml
- force=yes
+ copy:
+ dest: /tmp/logging-deployer-sa.yaml
+ src: "{{role_path}}/files/logging-deployer-sa.yaml"
+ force: yes
- name: "Create logging-deployer service account"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml
register: deployer_output
failed_when: "deployer_output.rc == 1 and 'exists' not in deployer_output.stderr"
- name: "Set permissions for logging-deployer service account"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer
register: permiss_output
failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
- name: "Set permissions for fluentd"
- command: {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ command: >
+ {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd_output
failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
- name: "Set additional permissions for fluentd"
- command: {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ command: >
+ {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
register: fluentd2_output
failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
- name: "Create deployer template"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift
register: template_output
failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
@@ -90,13 +99,12 @@
- name: "Scale fluentd deployment config"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}"
-
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}
- - name: "Scale fluentd replication controller"
- command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale rc/logging-fluentd-1 --replicas={{ fluentd_replicas | default('1') }}"
- - debug: msg="Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
+ - debug:
+ msg: "Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
- name: Delete temp directory
file:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index cee1f1738..291cdbbb5 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -6,7 +6,7 @@
retries: 50
delay: 5
changed_when: false
- with_items: openshift_nodes
+ with_items: "{{ openshift_nodes }}"
- name: Set node schedulability
command: >
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 6bf28ff2b..09bde6002 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -9,10 +9,10 @@ os_firewall_allow:
port: "{{ openshift.master.api_port }}/tcp"
- service: api controllers https
port: "{{ openshift.master.controllers_port }}/tcp"
-- service: dns tcp
- port: 53/tcp
-- service: dns udp
- port: 53/udp
+- service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+- service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
- service: Fluentd td-agent tcp
port: 24224/tcp
- service: Fluentd td-agent udp
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 0d4241e2c..e882e0b8b 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,5 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cli
- role: openshift_cloud_provider
+- role: openshift_builddefaults
- role: openshift_master_facts
- role: openshift_hosted_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 18a42bf93..fe0784ea2 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -78,23 +78,50 @@
action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present"
when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
not openshift.common.is_atomic | bool
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Ensure htpasswd directory exists
file:
path: "{{ item.filename | dirname }}"
state: directory
when: item.kind == 'HTPasswdPasswordIdentityProvider'
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Create the htpasswd file if needed
- copy:
+ template:
dest: "{{ item.filename }}"
- content: ""
+ src: htpasswd.j2
mode: 0600
- force: no
+ backup: yes
when: item.kind == 'HTPasswdPasswordIdentityProvider'
- with_items: openshift.master.identity_providers
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the ldap ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}"
+ content: "{{ openshift.master.ldap_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider'
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the openid ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}"
+ content: "{{ openshift.master.openid_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != ''
+ with_items: "{{ openshift.master.identity_providers }}"
+
+- name: Create the request header ca file if needed
+ copy:
+ dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}"
+ content: "{{ openshift.master.request_header_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
+ with_items: "{{ openshift.master.identity_providers }}"
- name: Install the systemd units
include: systemd_units.yml
@@ -212,7 +239,7 @@
mode: 0700
owner: "{{ item }}"
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: client_users
+ with_items: "{{ client_users }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
@@ -220,7 +247,7 @@
command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.kube/config
args:
creates: ~{{ item }}/.kube/config
- with_items: client_users
+ with_items: "{{ client_users }}"
- name: Update the permissions on the admin client config(s)
file:
@@ -229,4 +256,4 @@
mode: 0700
owner: "{{ item }}"
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
- with_items: client_users
+ with_items: "{{ client_users }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 862cfa8f1..4cf632841 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master/templates/htpasswd.j2 b/roles/openshift_master/templates/htpasswd.j2
new file mode 100644
index 000000000..ba2c02e20
--- /dev/null
+++ b/roles/openshift_master/templates/htpasswd.j2
@@ -0,0 +1,5 @@
+{% if 'htpasswd_users' in openshift.master %}
+{% for user,pass in openshift.master.htpasswd_users.iteritems() %}
+{{ user ~ ':' ~ pass }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 1009aa318..48bb8a13f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -123,7 +123,7 @@ kubernetesMasterConfig:
keyFile: master.proxy-client.key
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
- servicesSubnet: {{ openshift.master.portal_net }}
+ servicesSubnet: {{ openshift.common.portal_net }}
staticNodeNames: {{ openshift_node_ips | default([], true) }}
{% endif %}
masterClients:
@@ -138,7 +138,7 @@ networkConfig:
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
- serviceNetworkCIDR: {{ openshift.master.portal_net }}
+ serviceNetworkCIDR: {{ openshift.common.portal_net }}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
@@ -209,3 +209,5 @@ servingInfo:
{% endfor %}
{% endfor %}
{% endif %}
+volumeConfig:
+ dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 69754ee10..01a8428a0 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 048a4305a..89ccb1eed 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+{% endif %}
+{% if 'https_proxy' in openshift.common %}
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+{% endif %}
+{% if 'no_proxy' in openshift.common %}
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 9017b7d2b..394f9d381 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -4,14 +4,14 @@
path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}"
state: directory
mode: 0700
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- file:
src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
state: hard
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
-
- ca.crt
- ca.key
@@ -26,7 +26,7 @@
--cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
--overwrite=false
when: item.master_certs_missing | bool
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- file:
src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
@@ -34,5 +34,5 @@
state: hard
force: true
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 4d7c04065..0cbbaffc2 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -22,7 +22,6 @@
extension_scripts: "{{ openshift_master_extension_scripts | default(None) }}"
extension_stylesheets: "{{ openshift_master_extension_stylesheets | default(None) }}"
extensions: "{{ openshift_master_extensions | default(None) }}"
- oauth_template: "{{ openshift_master_oauth_template | default(None) }}"
etcd_hosts: "{{ openshift_master_etcd_hosts | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
@@ -30,10 +29,10 @@
embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
+ # defaults to 8053 when using dnsmasq in 1.2/3.2
dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
@@ -42,11 +41,15 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
+ ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
+ openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
+ request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
registry_url: "{{ oreg_url | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
- default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain) | default(None) }}"
+ default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain | default(None), true) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
project_request_message: "{{ osm_project_request_message | default(None) }}"
@@ -54,7 +57,6 @@
mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
- router_selector: "{{ openshift_router_selector | default(None) }}"
registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
@@ -73,3 +75,4 @@
oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"
+ dynamic_provisioning_enabled: "{{ openshift_master_dynamic_provisioning_enabled | default(None) }}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
new file mode 100644
index 000000000..3b0ee2761
--- /dev/null
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -0,0 +1,14 @@
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.master.builddefaults_git_http_proxy | default(omit, true) }}"
+ gitHTTPSProxy: "{{ openshift.master.builddefaults_git_https_proxy | default(omit, true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" \ No newline at end of file
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
new file mode 100644
index 000000000..2e903379a
--- /dev/null
+++ b/roles/openshift_metrics/README.md
@@ -0,0 +1,57 @@
+OpenShift Metrics with Hawkular
+====================
+
+OpenShift Metrics Installation
+
+Requirements
+------------
+It requires subdomain fqdn to be set.
+If persistence is enabled, then it also requires NFS.
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
+| openshift_hosted_metrics_deploy | False | If metrics should be deployed |
+| openshift_hosted_metrics_storage_nfs_directory | /exports | Root export directory. |
+| openshift_hosted_metrics_storage_volume_name | metrics | Metrics volume within openshift_hosted_metrics_volume_dir |
+| openshift_hosted_metrics_storage_volume_size | 10Gi | Metrics volume size |
+| openshift_hosted_metrics_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. |
+| openshift_hosted_metrics_duration | 7 | Metrics query duration |
+| openshift_hosted_metrics_resolution | 10 | Metrics resolution |
+
+
+From openshift_common:
+
+| Name | Default Value | |
+|---------------------------------------|----------------|----------------------------------------|
+| openshift_master_default_subdomain | null | Subdomain FQDN (Mandatory) |
+
+
+Dependencies
+------------
+openshift_facts
+openshift_examples
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-metrics
+ hosts: oo_first_master
+ roles:
+ - role: openshift_metrics
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
new file mode 100644
index 000000000..5f8d4f5c5
--- /dev/null
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -0,0 +1,3 @@
+dependencies:
+- { role: openshift_examples }
+- { role: openshift_facts } \ No newline at end of file
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
new file mode 100644
index 000000000..ca29ad6e1
--- /dev/null
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}}
+ changed_when: false
+
+- name: Create metrics-deployer Service Account
+ shell: >
+ echo {{ deployer_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n openshift-infra
+ --config={{hawkular_tmp_conf}}
+ -f -
+ register: deployer_create_service_account
+ failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0"
+ changed_when: deployer_create_service_account.rc == 0
+
+- name: Create metrics-deployer Secret
+ command: >
+ {{ openshift.common.client_binary }}
+ secrets new metrics-deployer
+ nothing=/dev/null
+ --config={{hawkular_tmp_conf}}
+ -n openshift-infra
+ register: deployer_create_secret
+ failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0"
+ changed_when: deployer_create_secret.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{hawkular_tmp_conf}}
+ with_items: "{{hawkular_permission_oc_commands}}"
+ register: hawkular_perm_task
+ failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0"
+ changed_when: hawkular_perm_task.rc == 0
+
+- name: Check openshift_master_default_subdomain
+ fail:
+ msg: "Default subdomain should be defined"
+ when: openshift.master.default_subdomain is not defined
+
+- name: Create Heapster and Hawkular/Cassandra Services
+ shell: >
+ {{ openshift.common.client_binary }} process -f \
+ /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }} USE_PERSISTENT_STORAGE={{ hawkular_persistence }} \
+ METRIC_DURATION={{ openshift.hosted.metrics.duration }} METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} |
+ {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
+ register: oex_heapster_services
+ failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
+ changed_when: false
+
+- name: Clean temporary config file
+ command: >
+ rm -rf {{hawkular_tmp_conf}}
+ changed_when: false
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
new file mode 100644
index 000000000..82d9d29f7
--- /dev/null
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -0,0 +1,19 @@
+hawkular_permission_oc_commands:
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+
+deployer_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
+
+
+hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
+
+hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
+
+hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 84ba9ac2e..ca0c332ea 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,3 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cloud_provider
- role: openshift_common
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq
+
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8987e0191..06fde88af 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -9,13 +9,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- # TODO: Replace this with a lookup or filter plugin.
- # TODO: Move this to the node role
- dns_ip: "{{ openshift_dns_ip
- | default(openshift_master_cluster_vip
- | default(None if openshift.common.version_gte_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -32,6 +25,7 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 28cb1ea26..9ba1a01dd 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,8 +1,8 @@
allowDisabledDocker: false
apiVersion: v1
dnsDomain: {{ openshift.common.dns_domain }}
-{% if 'dns_ip' in openshift.common %}
-dnsIP: {{ openshift.common.dns_ip }}
+{% if 'dns_ip' in openshift.node %}
+dnsIP: {{ openshift.node.dns_ip }}
{% endif %}
dockerConfig:
execHandlerName: ""
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index c9a7a40c8..216c11093 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Create openshift_generated_configs_dir if it doesn't exist
+- name: Create openshift_generated_configs_dir if it doesn\'t exist
file:
path: "{{ openshift_generated_configs_dir }}"
state: directory
@@ -19,7 +19,7 @@
--user=system:node:{{ item.openshift.common.hostname }}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Generate the node server certificate
command: >
@@ -33,4 +33,4 @@
--signer-serial={{ openshift_master_ca_serial }}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
new file mode 100755
index 000000000..51e0751e9
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -0,0 +1,55 @@
+#!/bin/bash -x
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# Drop this script in /etc/NetworkManager/dispatcher.d/
+# systemctl restart NetworkManager
+# Configure node-config.yaml to set dnsIP: to the ip address of this
+# node
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
+ # couldn't find an existing method to determine if the interface owns the
+ # default route
+ def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+ def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
+ def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
+ if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+ if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+ cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+strict-order
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+EOF
+ fi
+ # zero out our upstream servers list and feed it into dnsmasq
+ echo '' > /etc/dnsmasq.d/origin-upstream-dns.conf
+ for ns in ${DHCP4_DOMAIN_NAME_SERVERS}; do
+ echo "server=${ns}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ done
+ echo "listen-address=${def_route_ip}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ systemctl restart dnsmasq
+
+ sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ fi
+fi
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
new file mode 100644
index 000000000..7e9e4d299
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart NetworkManager
+ service:
+ name: NetworkManager
+ state: restarted
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
new file mode 100644
index 000000000..c83d64ae4
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Node DNSMasq support
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_common
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
new file mode 100644
index 000000000..4cb24469d
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ action: "{{ ansible_pkg_mgr }} name=dnsmasq state=installed"
+ when: not openshift.common.is_atomic | bool
+
+- name: Install dnsmasq configuration
+ template:
+ src: origin-dns.conf.j2
+ dest: /etc/dnsmasq.d/origin-dns.conf
+
+# Dynamic NetworkManager based dispatcher
+- include: ./network-manager.yml
+ when: network_manager_active | bool
+
+# Relies on ansible in order to configure static config
+- include: ./no-network-manager.yml
+ when: not network_manager_active | bool
+
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
new file mode 100644
index 000000000..dddcfc9da
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -0,0 +1,9 @@
+---
+- name: Install network manager dispatch script
+ copy:
+ src: networkmanager/99-origin-dns.sh
+ dest: /etc/NetworkManager/dispatcher.d/
+ mode: 0755
+ notify: restart NetworkManager
+
+- meta: flush_handlers
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
new file mode 100644
index 000000000..4d1bd3794
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -0,0 +1,2 @@
+---
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation." \ No newline at end of file
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
new file mode 100644
index 000000000..1753bb821
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -0,0 +1,4 @@
+strict-order
+no-resolv
+domain-needed
+server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index 5dd28d52a..bafda9695 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -1,7 +1,7 @@
- name: test if service accounts exists
command: >
{{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}
- with_items: openshift_serviceaccounts_names
+ with_items: "{{ openshift_serviceaccounts_names }}"
failed_when: false
changed_when: false
register: account_test
@@ -13,8 +13,8 @@
-n {{ openshift_serviceaccounts_namespace }} create -f -
when: item.1.rc != 0
with_together:
- - openshift_serviceaccounts_names
- - account_test.results
+ - "{{ openshift_serviceaccounts_names }}"
+ - "{{ account_test.results }}"
- name: test if scc needs to be updated
command: >
@@ -22,7 +22,7 @@
changed_when: false
failed_when: false
register: scc_test
- with_items: openshift_serviceaccounts_sccs
+ with_items: "{{ openshift_serviceaccounts_sccs }}"
- name: Grant the user access to the privileged scc
command: >
@@ -30,8 +30,8 @@
privileged system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}"
with_nested:
- - openshift_serviceaccounts_names
- - scc_test.results
+ - "{{ openshift_serviceaccounts_names }}"
+ - "{{ scc_test.results }}"
- include: legacy_add_scc_to_user.yml
when: not openshift.common.version_gte_3_1_or_1_1
diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml
index 5f6893129..90592e9d0 100644
--- a/roles/openshift_storage_nfs/defaults/main.yml
+++ b/roles/openshift_storage_nfs/defaults/main.yml
@@ -8,6 +8,14 @@ openshift:
options: "*(rw,root_squash)"
volume:
name: "registry"
+ metrics:
+ deploy: False
+ storage:
+ nfs:
+ directory: "/exports"
+ options: "*(rw,root_squash)"
+ volume:
+ name: "metrics"
os_firewall_use_firewalld: False
os_firewall_allow:
- service: nfs
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 1ee02e18a..3680ef5b5 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -62,7 +62,7 @@ Both of them are mounted into `/exports/openshift` directory. Both directories
exported via NFS. json files are created in /root.
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -87,7 +87,7 @@ exported via NFS. json files are created in /root.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index e3176e611..20413d563 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,3 +1,5 @@
---
os_firewall_enabled: True
os_firewall_use_firewalld: True
+os_firewall_allow: []
+os_firewall_deny: []
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index ac4600f83..241fa8823 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -52,29 +52,25 @@
port: "{{ item.port }}"
permanent: false
state: enabled
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Persist firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: true
state: enabled
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Remove firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: false
state: disabled
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"
- name: Persist removal of firewalld allow rules
firewalld:
port: "{{ item.port }}"
permanent: true
state: disabled
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 3b584f8eb..070fe6a3a 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -49,8 +49,7 @@
action: add
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
- with_items: os_firewall_allow
- when: os_firewall_allow is defined
+ with_items: "{{ os_firewall_allow }}"
- name: Remove iptables rules
os_firewall_manage_iptables:
@@ -58,5 +57,4 @@
action: remove
protocol: "{{ item.port.split('/')[1] }}"
port: "{{ item.port.split('/')[0] }}"
- with_items: os_firewall_deny
- when: os_firewall_deny is defined
+ with_items: "{{ os_firewall_deny }}"