summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml19
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml20
-rw-r--r--roles/docker/templates/crio.conf.j25
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/etcd_common/tasks/backup.yml3
-rw-r--r--roles/etcd_common/tasks/main.yml2
-rw-r--r--roles/etcd_common/tasks/noop.yml4
-rw-r--r--roles/flannel_register/defaults/main.yaml5
-rw-r--r--roles/flannel_register/templates/flannel-config.json1
-rw-r--r--roles/openshift_aws/README.md84
-rw-r--r--roles/openshift_aws/defaults/main.yml209
-rw-r--r--roles/openshift_aws/filter_plugins/filters.py28
-rw-r--r--roles/openshift_aws/meta/main.yml3
-rw-r--r--roles/openshift_aws/tasks/ami_copy.yml34
-rw-r--r--roles/openshift_aws/tasks/build_ami.yml48
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml34
-rw-r--r--roles/openshift_aws/tasks/elb.yml68
-rw-r--r--roles/openshift_aws/tasks/iam_cert.yml29
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml45
-rw-r--r--roles/openshift_aws/tasks/provision.yml54
-rw-r--r--roles/openshift_aws/tasks/provision_nodes.yml66
-rw-r--r--roles/openshift_aws/tasks/s3.yml7
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml32
-rw-r--r--roles/openshift_aws/tasks/seal_ami.yml49
-rw-r--r--roles/openshift_aws/tasks/security_group.yml45
-rw-r--r--roles/openshift_aws/tasks/ssh_keys.yml (renamed from roles/openshift_aws_ssh_keys/tasks/main.yml)4
-rw-r--r--roles/openshift_aws/tasks/vpc.yml (renamed from roles/openshift_aws_vpc/tasks/main.yml)21
-rw-r--r--roles/openshift_aws_ami_copy/README.md50
-rw-r--r--roles/openshift_aws_ami_copy/tasks/main.yml26
-rw-r--r--roles/openshift_aws_elb/README.md75
-rw-r--r--roles/openshift_aws_elb/defaults/main.yml33
-rw-r--r--roles/openshift_aws_elb/meta/main.yml12
-rw-r--r--roles/openshift_aws_elb/tasks/main.yml57
-rw-r--r--roles/openshift_aws_iam_kms/README.md43
-rw-r--r--roles/openshift_aws_iam_kms/defaults/main.yml1
-rw-r--r--roles/openshift_aws_iam_kms/meta/main.yml13
-rw-r--r--roles/openshift_aws_iam_kms/tasks/main.yml18
-rw-r--r--roles/openshift_aws_launch_config/README.md72
-rw-r--r--roles/openshift_aws_launch_config/defaults/main.yml1
-rw-r--r--roles/openshift_aws_launch_config/meta/main.yml12
-rw-r--r--roles/openshift_aws_launch_config/tasks/main.yml50
-rw-r--r--roles/openshift_aws_launch_config/templates/cloud-init.j29
-rw-r--r--roles/openshift_aws_node_group/README.md77
-rw-r--r--roles/openshift_aws_node_group/defaults/main.yml58
-rw-r--r--roles/openshift_aws_node_group/tasks/main.yml32
-rw-r--r--roles/openshift_aws_s3/README.md43
-rw-r--r--roles/openshift_aws_s3/tasks/main.yml6
-rw-r--r--roles/openshift_aws_sg/README.md59
-rw-r--r--roles/openshift_aws_sg/defaults/main.yml48
-rw-r--r--roles/openshift_aws_sg/tasks/main.yml53
-rw-r--r--roles/openshift_aws_ssh_keys/README.md49
-rw-r--r--roles/openshift_aws_vpc/README.md62
-rw-r--r--roles/openshift_aws_vpc/defaults/main.yml1
-rw-r--r--roles/openshift_cfme/defaults/main.yml3
-rw-r--r--roles/openshift_cfme/meta/main.yml1
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml7
-rw-r--r--roles/openshift_cli/meta/main.yml1
-rw-r--r--roles/openshift_common/README.md45
-rw-r--r--roles/openshift_common/defaults/main.yml3
-rw-r--r--roles/openshift_common/meta/main.yml15
-rw-r--r--roles/openshift_common/tasks/main.yml78
-rw-r--r--roles/openshift_examples/meta/main.yml3
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py120
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py2
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py16
-rw-r--r--roles/openshift_health_checker/library/aos_version.py31
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py17
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py87
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_update.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py3
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py9
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py187
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py4
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py4
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py10
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py15
-rw-r--r--roles/openshift_hosted/README.md1
-rw-r--r--roles/openshift_hosted/defaults/main.yml6
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml8
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml18
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml1
-rw-r--r--roles/openshift_hosted_templates/meta/main.yml3
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml8
-rw-r--r--roles/openshift_logging_curator/templates/curator.j22
-rw-r--r--roles/openshift_logging_curator/vars/main.yml4
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j215
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml4
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j22
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml4
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j24
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml4
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_logging_mux/vars/main.yml4
-rw-r--r--roles/openshift_master/README.md9
-rw-r--r--roles/openshift_master/defaults/main.yml21
-rw-r--r--roles/openshift_master/meta/main.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml9
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j24
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j24
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py2
-rw-r--r--roles/openshift_metrics/tasks/pre_install.yaml2
-rw-r--r--roles/openshift_node/README.md10
-rw-r--r--roles/openshift_node/defaults/main.yml28
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml2
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml33
-rw-r--r--roles/openshift_node/tasks/config.yml2
-rw-r--r--roles/openshift_node/tasks/install.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml4
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j212
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/README.md4
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml5
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml2
-rw-r--r--roles/openshift_node_upgrade/meta/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service4
-rw-r--r--roles/openshift_persistent_volumes/README.md7
-rw-r--r--roles/openshift_persistent_volumes/meta/main.yml1
-rw-r--r--roles/openshift_prometheus/README.md95
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml74
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/meta/main.yaml19
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml241
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml26
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/alertmanager.yml.j220
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus.rules.j24
-rw-r--r--roles/openshift_prometheus/templates/prometheus.yml.j2174
-rw-r--r--roles/openshift_prometheus/templates/prometheus_deployment.j2240
-rw-r--r--roles/openshift_prometheus/tests/inventory2
-rw-r--r--roles/openshift_prometheus/tests/test.yaml5
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml4
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml2
-rw-r--r--roles/openshift_version/tasks/main.yml6
149 files changed, 2366 insertions, 1527 deletions
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 787f51f94..24ca0d9f8 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,6 +3,15 @@
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(openshift.docker.insecure_registries)) }}"
when: openshift.docker.insecure_registries
+- set_fact:
+ l_crio_registries: "{{ openshift.docker.additional_registries + ['docker.io'] }}"
+ when: openshift.docker.additional_registries
+- set_fact:
+ l_crio_registries: "{{ ['docker.io'] }}"
+ when: not openshift.docker.additional_registries
+- set_fact:
+ l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
+ when: openshift.docker.additional_registries
- name: Ensure container-selinux is installed
package:
@@ -88,10 +97,16 @@
l_crio_image_prepend: "docker.io/gscrivano"
l_crio_image_name: "crio-o-fedora"
- - name: Use Centos based image when distribution is Red Hat or CentOS
+ - name: Use Centos based image when distribution is CentOS
set_fact:
l_crio_image_name: "cri-o-centos"
- when: ansible_distribution in ['RedHat', 'CentOS']
+ when: ansible_distribution == "CentOS"
+
+ - name: Use RHEL based image when distribution is Red Hat
+ set_fact:
+ l_crio_image_prepend: "registry.access.redhat.com"
+ l_crio_image_name: "cri-o"
+ when: ansible_distribution == "RedHat"
# For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- name: Use a testing registry if requested
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 57a84bc2c..146e5f430 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -100,18 +100,22 @@
l_docker_image_prepend: "registry.fedoraproject.org/f25"
when: ansible_distribution == 'Fedora'
- # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504
- - name: Use a testing registry if requested
- set_fact:
- l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}"
- when:
- - openshift_docker_systemcontainer_image_registry_override is defined
- - openshift_docker_systemcontainer_image_registry_override != ""
-
- name: Set the full image name
set_fact:
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
+ # For https://github.com/openshift/openshift-ansible/pull/5354#issuecomment-328552959
+ - name: Use a specific image if requested
+ set_fact:
+ l_docker_image: "{{ openshift_docker_systemcontainer_image_override }}"
+ when:
+ - openshift_docker_systemcontainer_image_override is defined
+ - openshift_docker_systemcontainer_image_override != ""
+
+ # Be nice and let the user see the variable result
+ - debug:
+ var: l_docker_image
+
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index 5b31932b1..b4ee84fd0 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -120,6 +120,11 @@ insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
]
+# registries is used to specify a comma separated list of registries to be used
+# when pulling an unqualified image (e.g. fedora:rawhide).
+registries = [
+{{ l_additional_crio_registries|default("") }}
+]
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 89993f7ea..b67411f40 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -56,7 +56,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if openshift.common.etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
diff --git a/roles/etcd_common/tasks/backup.yml b/roles/etcd_common/tasks/backup.yml
index 2bc486d3f..c1580640f 100644
--- a/roles/etcd_common/tasks/backup.yml
+++ b/roles/etcd_common/tasks/backup.yml
@@ -29,7 +29,6 @@
- name: Check current etcd disk usage
shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
- when: r_etcd_common_embedded_etcd | bool
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
changed_when: false
@@ -39,7 +38,7 @@
msg: >
{{ l_etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
{{ l_avail_disk.stdout }} Kb available.
- when: (r_etcd_common_embedded_etcd | bool) and (l_etcd_disk_usage.stdout|int > l_avail_disk.stdout|int)
+ when: l_etcd_disk_usage.stdout|int*2 > l_avail_disk.stdout|int
# For non containerized and non embedded we should have the correct version of
# etcd installed already. So don't do anything.
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
index f5bcd03ee..6ed87e6c7 100644
--- a/roles/etcd_common/tasks/main.yml
+++ b/roles/etcd_common/tasks/main.yml
@@ -6,4 +6,4 @@
- name: Include main action task file
include: "{{ r_etcd_common_action }}.yml"
- when: '"noop" not in r_etcd_common_action'
+ when: r_etcd_common_action != "noop"
diff --git a/roles/etcd_common/tasks/noop.yml b/roles/etcd_common/tasks/noop.yml
new file mode 100644
index 000000000..a88d78235
--- /dev/null
+++ b/roles/etcd_common/tasks/noop.yml
@@ -0,0 +1,4 @@
+---
+# This is file is here because the usage of tags, specifically `pre_upgrade`
+# breaks the functionality of this role.
+# See https://bugzilla.redhat.com/show_bug.cgi?id=1464025
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index ddf8230ec..71c8f38c3 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,7 +1,6 @@
---
-flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
-flannel_min_network: 172.30.5.0
-flannel_subnet_len: 24
+flannel_network: "{{ openshift.master.sdn_cluster_network_cidr }}"
+flannel_subnet_len: "{{ 32 - openshift.master.sdn_host_subnet_length }}"
flannel_etcd_key: /openshift.com/network
etcd_hosts: "{{ etcd_urls }}"
etcd_conf_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
index 89ce4c30b..bba3729fa 100644
--- a/roles/flannel_register/templates/flannel-config.json
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -1,7 +1,6 @@
{
"Network": "{{ flannel_network }}",
"SubnetLen": {{ flannel_subnet_len }},
- "SubnetMin": "{{ flannel_min_network }}",
"Backend": {
"Type": "host-gw"
}
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
new file mode 100644
index 000000000..696efbea5
--- /dev/null
+++ b/roles/openshift_aws/README.md
@@ -0,0 +1,84 @@
+openshift_aws
+==================================
+
+Provision AWS infrastructure helpers.
+
+Requirements
+------------
+
+* Ansible 2.3
+* Boto
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value
+|---------------------------------------------------|-----------------------
+| openshift_aws_clusterid | default
+| openshift_aws_elb_scheme | internet-facing
+| openshift_aws_launch_config_bootstrap_token | ''
+| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
+| openshift_aws_ami_copy_wait | False
+| openshift_aws_users | []
+| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_create_vpc | False
+| openshift_aws_node_group_type | master
+| openshift_aws_elb_cert_arn | ''
+| openshift_aws_kubernetes_cluster_status | owned
+| openshift_aws_s3_mode | create
+| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_ssh_keys | False
+| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
+| openshift_aws_use_custom_ami | False
+| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
+| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
+| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
+| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
+| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
+| openshift_aws_create_security_groups | False
+| openshift_aws_create_iam_cert | False
+| openshift_aws_create_scale_group | True
+| openshift_aws_ami_encrypt | False
+| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
+| openshift_aws_region | us-east-1
+| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
+| openshift_aws_elb_idle_timout | 400
+| openshift_aws_subnet_name | us-east-1c
+| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
+| openshift_aws_create_launch_config | True
+| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
+| openshift_aws_ami_name | openshift-gi
+| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
+| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
+| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
+|
+
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+```yaml
+- include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ vars:
+ openshift_aws_clusterid: test
+ openshift_aws_region: us-east-1
+ openshift_aws_create_vpc: true
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
new file mode 100644
index 000000000..4e7f54f79
--- /dev/null
+++ b/roles/openshift_aws/defaults/main.yml
@@ -0,0 +1,209 @@
+---
+openshift_aws_create_vpc: True
+openshift_aws_create_s3: True
+openshift_aws_create_iam_cert: True
+openshift_aws_create_security_groups: True
+openshift_aws_create_launch_config: True
+openshift_aws_create_scale_group: True
+openshift_aws_kubernetes_cluster_status: owned # or shared
+openshift_aws_node_group_type: master
+
+openshift_aws_wait_for_ssh: True
+
+openshift_aws_clusterid: default
+openshift_aws_region: us-east-1
+openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
+openshift_aws_iam_cert_path: ''
+openshift_aws_iam_cert_chain_path: ''
+openshift_aws_iam_cert_key_path: ''
+openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift {{ openshift_aws_node_group_type }}"
+
+openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+openshift_aws_ami: ''
+openshift_aws_ami_copy_wait: False
+openshift_aws_ami_encrypt: False
+openshift_aws_ami_copy_src_region: "{{ openshift_aws_region }}"
+openshift_aws_ami_name: openshift-gi
+openshift_aws_base_ami_name: ami_base
+
+openshift_aws_launch_config_bootstrap_token: ''
+openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}-{{ ansible_date_time.epoch }}"
+
+openshift_aws_users: []
+
+openshift_aws_ami_tags:
+ bootstrap: "true"
+ openshift-created: "true"
+ clusterid: "{{ openshift_aws_clusterid }}"
+
+openshift_aws_s3_mode: create
+openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"
+
+openshift_aws_elb_health_check:
+ ping_protocol: tcp
+ ping_port: 443
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+
+openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}"
+openshift_aws_elb_idle_timout: 400
+openshift_aws_elb_scheme: internet-facing
+openshift_aws_elb_cert_arn: ''
+
+openshift_aws_elb_listeners:
+ master:
+ external:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: ssl
+ instance_port: 443
+ - protocol: ssl
+ load_balancer_port: 443
+ instance_protocol: ssl
+ instance_port: 443
+ # ssl certificate required for https or ssl
+ ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"
+ internal:
+ - protocol: tcp
+ load_balancer_port: 80
+ instance_protocol: tcp
+ instance_port: 80
+ - protocol: tcp
+ load_balancer_port: 443
+ instance_protocol: tcp
+ instance_port: 443
+
+openshift_aws_node_group_config_master_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: False
+
+openshift_aws_node_group_config_node_volumes:
+- device_name: /dev/sdb
+ volume_size: 100
+ device_type: gp2
+ delete_on_termination: True
+
+openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+
+openshift_aws_node_group_config:
+ tags: "{{ openshift_aws_node_group_config_tags }}"
+ master:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_master_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 3
+ desired_size: 3
+ tags:
+ host-type: master
+ sub-host-type: default
+ wait_for_instances: True
+ compute:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 3
+ max_size: 100
+ desired_size: 3
+ tags:
+ host-type: node
+ sub-host-type: compute
+ infra:
+ instance_type: m4.xlarge
+ ami: "{{ openshift_aws_ami }}"
+ volumes: "{{ openshift_aws_node_group_config_node_volumes }}"
+ health_check:
+ period: 60
+ type: EC2
+ min_size: 2
+ max_size: 20
+ desired_size: 2
+ tags:
+ host-type: node
+ sub-host-type: infra
+
+openshift_aws_elb_security_groups:
+- "{{ openshift_aws_clusterid }}"
+- "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}"
+
+openshift_aws_elb_instance_filter:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+
+openshift_aws_node_security_groups:
+ default:
+ name: "{{ openshift_aws_clusterid }}"
+ desc: "{{ openshift_aws_clusterid }} default"
+ rules:
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: 0.0.0.0/0
+ - proto: all
+ from_port: all
+ to_port: all
+ group_name: "{{ openshift_aws_clusterid }}"
+ master:
+ name: "{{ openshift_aws_clusterid }}_master"
+ desc: "{{ openshift_aws_clusterid }} master instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ compute:
+ name: "{{ openshift_aws_clusterid }}_compute"
+ desc: "{{ openshift_aws_clusterid }} compute node instances"
+ infra:
+ name: "{{ openshift_aws_clusterid }}_infra"
+ desc: "{{ openshift_aws_clusterid }} infra node instances"
+ rules:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 443
+ to_port: 443
+ cidr_ip: 0.0.0.0/0
+ - proto: tcp
+ from_port: 30000
+ to_port: 32000
+ cidr_ip: 0.0.0.0/0
+ etcd:
+ name: "{{ openshift_aws_clusterid }}_etcd"
+ desc: "{{ openshift_aws_clusterid }} etcd instances"
+
+openshift_aws_vpc_tags:
+ Name: "{{ openshift_aws_vpc_name }}"
+
+openshift_aws_subnet_name: us-east-1c
+
+openshift_aws_vpc:
+ name: "{{ openshift_aws_vpc_name }}"
+ cidr: 172.31.0.0/16
+ subnets:
+ us-east-1:
+ - cidr: 172.31.48.0/20
+ az: "us-east-1c"
+ - cidr: 172.31.32.0/20
+ az: "us-east-1e"
+ - cidr: 172.31.16.0/20
+ az: "us-east-1a"
diff --git a/roles/openshift_aws/filter_plugins/filters.py b/roles/openshift_aws/filter_plugins/filters.py
new file mode 100644
index 000000000..06e1f9602
--- /dev/null
+++ b/roles/openshift_aws/filter_plugins/filters.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use in openshift_aws
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_aws role'''
+
+ @staticmethod
+ def build_instance_tags(clusterid, status='owned'):
+ ''' This function will return a dictionary of the instance tags.
+
+ The main desire to have this inside of a filter_plugin is that we
+ need to build the following key.
+
+ {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": 'owned'}
+
+ '''
+ tags = {'clusterid': clusterid,
+ 'kubernetes.io/cluster/{}'.format(clusterid): status}
+
+ return tags
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'build_instance_tags': self.build_instance_tags}
diff --git a/roles/openshift_aws/meta/main.yml b/roles/openshift_aws/meta/main.yml
new file mode 100644
index 000000000..875efcb8f
--- /dev/null
+++ b/roles/openshift_aws/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- lib_utils
diff --git a/roles/openshift_aws/tasks/ami_copy.yml b/roles/openshift_aws/tasks/ami_copy.yml
new file mode 100644
index 000000000..07020dd62
--- /dev/null
+++ b/roles/openshift_aws/tasks/ami_copy.yml
@@ -0,0 +1,34 @@
+---
+- fail:
+ msg: "{{ item }} needs to be defined"
+ when: item is not defined
+ with_items:
+ - openshift_aws_ami_copy_src_ami
+ - openshift_aws_ami_copy_name
+
+- name: Create IAM KMS key with alias
+ oo_iam_kms:
+ state: present
+ alias: "{{ openshift_aws_iam_kms_alias }}"
+ region: "{{ openshift_aws_region }}"
+ register: created_kms
+
+- debug: var=created_kms.results
+
+- name: "Create copied AMI image and wait: {{ openshift_aws_ami_copy_wait }}"
+ ec2_ami_copy:
+ name: "{{ openshift_aws_ami_copy_name }}"
+ region: "{{ openshift_aws_region }}"
+ source_region: "{{ openshift_aws_ami_copy_src_region }}"
+ source_image_id: "{{ openshift_aws_ami_copy_src_ami }}"
+ encrypted: "{{ openshift_aws_ami_encrypt | bool }}"
+ kms_key_id: "{{ created_kms.results.KeyArn | default(omit) }}"
+ wait: "{{ openshift_aws_ami_copy_wait | default(omit) }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ register: copy_result
+
+- debug: var=copy_result
+
+- name: return AMI ID with setfact
+ set_fact:
+ openshift_aws_ami_copy_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/build_ami.yml
new file mode 100644
index 000000000..8d4e5ac43
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_ami.yml
@@ -0,0 +1,48 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create a vpc
+ include: vpc.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_security_groups | bool
+ name: Create compute security_groups
+ include: security_group.yml
+
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: create instance for ami creation
+ ec2:
+ assign_public_ip: yes
+ region: "{{ openshift_aws_region }}"
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ group: "{{ openshift_aws_clusterid }}"
+ instance_type: m4.xlarge
+ vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ image: "{{ openshift_aws_base_ami }}"
+ volumes:
+ - device_name: /dev/sdb
+ volume_type: gp2
+ volume_size: 100
+ delete_on_termination: true
+ wait: yes
+ exact_count: 1
+ count_tag:
+ Name: "{{ openshift_aws_base_ami_name }}"
+ instance_tags:
+ Name: "{{ openshift_aws_base_ami_name }}"
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
new file mode 100644
index 000000000..0dac1c23d
--- /dev/null
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -0,0 +1,34 @@
+---
+# When openshift_aws_use_custom_ami is '' then
+# we retrieve the latest build AMI.
+# Then set openshift_aws_ami to the ami.
+- when: openshift_aws_ami == ''
+ block:
+ - name: fetch recently created AMI
+ ec2_ami_find:
+ region: "{{ openshift_aws_region }}"
+ sort: creationDate
+ sort_order: descending
+ name: "{{ openshift_aws_ami_name }}*"
+ ami_tags: "{{ openshift_aws_ami_tags }}"
+ no_result_action: fail
+ register: amiout
+
+ - name: Set the openshift_aws_ami
+ set_fact:
+ openshift_aws_ami: "{{ amiout.results[0].ami_id }}"
+ when:
+ - "'results' in amiout"
+ - amiout.results|length > 0
+
+- when: openshift_aws_create_security_groups
+ name: "Create {{ openshift_aws_node_group_type }} security groups"
+ include: security_group.yml
+
+- when: openshift_aws_create_launch_config
+ name: "Create {{ openshift_aws_node_group_type }} launch config"
+ include: launch_config.yml
+
+- when: openshift_aws_create_scale_group
+ name: "Create {{ openshift_aws_node_group_type }} node group"
+ include: scale_group.yml
diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml
new file mode 100644
index 000000000..a1fdd66fc
--- /dev/null
+++ b/roles/openshift_aws/tasks/elb.yml
@@ -0,0 +1,68 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: debug
+ debug: var=vpcout
+
+- name: fetch the remote instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters: "{{ openshift_aws_elb_instance_filter }}"
+ register: instancesout
+
+- name: fetch the default subnet id
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name:
+ debug:
+ msg: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+
+- name: "Create ELB {{ openshift_aws_elb_name }}"
+ ec2_elb_lb:
+ name: "{{ openshift_aws_elb_name }}"
+ state: present
+ security_group_names: "{{ openshift_aws_elb_security_groups }}"
+ idle_timeout: "{{ openshift_aws_elb_idle_timout }}"
+ region: "{{ openshift_aws_region }}"
+ subnets:
+ - "{{ subnetout.subnets[0].id }}"
+ health_check: "{{ openshift_aws_elb_health_check }}"
+ listeners: "{{ openshift_aws_elb_listeners[openshift_aws_node_group_type][openshift_aws_elb_direction]
+ if 'master' in openshift_aws_node_group_type or 'infra' in openshift_aws_node_group_type
+ else openshift_aws_elb_listeners }}"
+ scheme: "{{ openshift_aws_elb_scheme }}"
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ register: new_elb
+
+# It is necessary to ignore_errors here because the instances are not in 'ready'
+# state when first added to ELB
+- name: "Add instances to ELB {{ openshift_aws_elb_name }}"
+ ec2_elb:
+ instance_id: "{{ item.id }}"
+ ec2_elbs: "{{ openshift_aws_elb_name }}"
+ state: present
+ region: "{{ openshift_aws_region }}"
+ wait: False
+ with_items: "{{ instancesout.instances }}"
+ ignore_errors: True
+ retries: 10
+ register: elb_call
+ until: elb_call|succeeded
+
+- debug:
+ msg: "{{ item }}"
+ with_items:
+ - "{{ new_elb }}"
diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml
new file mode 100644
index 000000000..cd9772a25
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_cert.yml
@@ -0,0 +1,29 @@
+---
+- name: upload certificates to AWS IAM
+ iam_cert23:
+ state: present
+ name: "{{ openshift_aws_iam_cert_name }}"
+ cert: "{{ openshift_aws_iam_cert_path }}"
+ key: "{{ openshift_aws_iam_cert_key_path }}"
+ cert_chain: "{{ openshift_aws_iam_cert_chain_path | default(omit) }}"
+ register: elb_cert_chain
+ failed_when:
+ - "'failed' in elb_cert_chain"
+ - elb_cert_chain.failed
+ - "'msg' in elb_cert_chain"
+ - "'already exists and has a different certificate body' in elb_cert_chain.msg"
+ - "'BotoServerError' in elb_cert_chain.msg"
+ when:
+ - openshift_aws_create_iam_cert | bool
+ - openshift_aws_iam_cert_path != ''
+ - openshift_aws_iam_cert_key_path != ''
+ - openshift_aws_elb_cert_arn == ''
+
+- name: set_fact openshift_aws_elb_cert_arn
+ set_fact:
+ openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}"
+
+- name: wait for cert to propagate
+ pause:
+ seconds: 5
+ when: elb_cert_chain.changed
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
new file mode 100644
index 000000000..65c5a6cc0
--- /dev/null
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -0,0 +1,45 @@
+---
+- fail:
+ msg: "Ensure that an AMI value is defined for openshift_aws_ami or openshift_aws_launch_config_custom_image."
+ when:
+ - openshift_aws_ami is undefined
+
+- name: fetch the security groups for launch config
+ ec2_group_facts:
+ filters:
+ group-name:
+ - "{{ openshift_aws_clusterid }}" # default sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}" # node type sg
+ - "{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}_k8s" # node type sg k8s
+ region: "{{ openshift_aws_region }}"
+ register: ec2sgs
+
+# Create the scale group config
+- name: Create the node scale group launch config
+ ec2_lc:
+ name: "{{ openshift_aws_launch_config_name }}"
+ region: "{{ openshift_aws_region }}"
+ image_id: "{{ openshift_aws_ami }}"
+ instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
+ security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
+ user_data: |-
+ #cloud-config
+ {% if openshift_aws_node_group_type != 'master' %}
+ write_files:
+ - path: /root/csr_kubeconfig
+ owner: root:root
+ permissions: '0640'
+ content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
+ - path: /root/openshift_settings
+ owner: root:root
+ permissions: '0640'
+ content:
+ openshift_type: "{{ openshift_aws_node_group_type }}"
+ runcmd:
+ - [ systemctl, enable, atomic-openshift-node]
+ - [ systemctl, start, atomic-openshift-node]
+ {% endif %}
+ key_name: "{{ openshift_aws_ssh_key_name }}"
+ ebs_optimized: False
+ volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
+ assign_public_ip: True
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
new file mode 100644
index 000000000..189caeaee
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -0,0 +1,54 @@
+---
+- when: openshift_aws_create_vpc | bool
+ name: create default vpc
+ include: vpc.yml
+
+- when: openshift_aws_create_iam_cert | bool
+ name: create the iam_cert for elb certificate
+ include: iam_cert.yml
+
+- when: openshift_aws_users | length > 0
+ name: create aws ssh keypair
+ include: ssh_keys.yml
+
+- when: openshift_aws_create_s3 | bool
+ name: create s3 bucket for registry
+ include: s3.yml
+
+- name: include scale group creation for master
+ include: build_node_group.yml
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": "{{ openshift_aws_node_group_type }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: create our master internal load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: internal
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-internal"
+ openshift_aws_elb_scheme: internal
+
+- name: create our master external load balancers
+ include: elb.yml
+ vars:
+ openshift_aws_elb_direction: external
+ openshift_aws_elb_name: "{{ openshift_aws_clusterid }}-{{openshift_aws_node_group_type }}-external"
+ openshift_aws_elb_scheme: internet-facing
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
+ when: openshift_aws_wait_for_ssh | bool
diff --git a/roles/openshift_aws/tasks/provision_nodes.yml b/roles/openshift_aws/tasks/provision_nodes.yml
new file mode 100644
index 000000000..fc4996c68
--- /dev/null
+++ b/roles/openshift_aws/tasks/provision_nodes.yml
@@ -0,0 +1,66 @@
+---
+# Get bootstrap config token
+# bootstrap should be created on first master
+# need to fetch it and shove it into cloud data
+- name: fetch master instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: slurp down the bootstrap.kubeconfig
+ slurp:
+ src: /etc/origin/master/bootstrap.kubeconfig
+ delegate_to: "{{ instancesout.instances[0].public_ip_address }}"
+ remote_user: root
+ register: bootstrap
+
+- name: set_fact for kubeconfig token
+ set_fact:
+ openshift_aws_launch_config_bootstrap_token: "{{ bootstrap['content'] | b64decode }}"
+
+- name: include build node group for infra
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: infra
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift infra"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-infra-{{ ansible_date_time.epoch }}"
+
+- name: include build node group for compute
+ include: build_node_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ openshift_aws_scale_group_name: "{{ openshift_aws_clusterid }} openshift compute"
+ openshift_aws_launch_config_name: "{{ openshift_aws_clusterid }}-compute-{{ ansible_date_time.epoch }}"
+
+- when: openshift_aws_wait_for_ssh | bool
+ block:
+ - name: pause and allow for instances to scale before we query them
+ pause:
+ seconds: 10
+
+ - name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": node
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+ - name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/tasks/s3.yml b/roles/openshift_aws/tasks/s3.yml
new file mode 100644
index 000000000..9cf37c840
--- /dev/null
+++ b/roles/openshift_aws/tasks/s3.yml
@@ -0,0 +1,7 @@
+---
+- name: Create an s3 bucket
+ s3:
+ bucket: "{{ openshift_aws_s3_bucket_name }}"
+ mode: "{{ openshift_aws_s3_mode }}"
+ region: "{{ openshift_aws_region }}"
+ when: openshift_aws_create_s3 | bool
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
new file mode 100644
index 000000000..3e969fc43
--- /dev/null
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -0,0 +1,32 @@
+---
+- name: query vpc
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ 'tag:Name': "{{ openshift_aws_vpc_name }}"
+ register: vpcout
+
+- name: fetch the subnet to use in scale group
+ ec2_vpc_subnet_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_subnet_name }}"
+ vpc-id: "{{ vpcout.vpcs[0].id }}"
+ register: subnetout
+
+- name: Create the scale group
+ ec2_asg:
+ name: "{{ openshift_aws_scale_group_name }}"
+ launch_config_name: "{{ openshift_aws_launch_config_name }}"
+ health_check_period: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.period }}"
+ health_check_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].health_check.type }}"
+ min_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].min_size }}"
+ max_size: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].max_size }}"
+ desired_capacity: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].desired_size }}"
+ region: "{{ openshift_aws_region }}"
+ termination_policies: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].termination_policy if 'termination_policy' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
+ wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
+ vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ tags:
+ - "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/seal_ami.yml b/roles/openshift_aws/tasks/seal_ami.yml
new file mode 100644
index 000000000..0cb749dcc
--- /dev/null
+++ b/roles/openshift_aws/tasks/seal_ami.yml
@@ -0,0 +1,49 @@
+---
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: bundle ami
+ ec2_ami:
+ instance_id: "{{ instancesout.instances.0.id }}"
+ region: "{{ openshift_aws_region }}"
+ state: present
+ description: "This was provisioned {{ ansible_date_time.iso8601 }}"
+ name: "{{ openshift_aws_ami_name }}"
+ tags: "{{ openshift_aws_ami_tags }}"
+ wait: yes
+ register: amioutput
+
+- debug: var=amioutput
+
+- when: openshift_aws_ami_encrypt | bool
+ block:
+ - name: augment the encrypted ami tags with source-ami
+ set_fact:
+ source_tag:
+ source-ami: "{{ amioutput.image_id }}"
+
+ - name: copy the ami for encrypted disks
+ include: ami_copy.yml
+ vars:
+ openshift_aws_ami_copy_name: "{{ openshift_aws_ami_name }}-encrypted"
+ openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}"
+ # TODO: How does the kms alias get passed to ec2_ami_copy
+ openshift_aws_ami_copy_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
+ openshift_aws_ami_copy_tags: "{{ source_tag | combine(openshift_aws_ami_tags) }}"
+ # this option currently fails due to boto waiters
+ # when supported this need to be reapplied
+ #openshift_aws_ami_copy_wait: True
+
+- name: terminate temporary instance
+ ec2:
+ state: absent
+ region: "{{ openshift_aws_region }}"
+ instance_ids: "{{ instancesout.instances.0.id }}"
diff --git a/roles/openshift_aws/tasks/security_group.yml b/roles/openshift_aws/tasks/security_group.yml
new file mode 100644
index 000000000..161e72fb4
--- /dev/null
+++ b/roles/openshift_aws/tasks/security_group.yml
@@ -0,0 +1,45 @@
+---
+- name: Fetch the VPC for the vpc.id
+ ec2_vpc_net_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_clusterid }}"
+ register: vpcout
+
+- name: Create default security group for cluster
+ ec2_group:
+ name: "{{ openshift_aws_node_security_groups.default.name }}"
+ description: "{{ openshift_aws_node_security_groups.default.desc }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ rules: "{{ openshift_aws_node_security_groups.default.rules | default(omit, True)}}"
+ register: sg_default_created
+
+- name: create the node group sgs
+ ec2_group:
+ name: "{{ item.name}}"
+ description: "{{ item.desc }}"
+ rules: "{{ item.rules if 'rules' in item else [] }}"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: create the k8s sgs for the node group
+ ec2_group:
+ name: "{{ item.name }}_k8s"
+ description: "{{ item.desc }} for k8s"
+ region: "{{ openshift_aws_region }}"
+ vpc_id: "{{ vpcout.vpcs[0].id }}"
+ register: k8s_sg_create
+ with_items:
+ - "{{ openshift_aws_node_security_groups[openshift_aws_node_group_type]}}"
+
+- name: tag sg groups with proper tags
+ ec2_tag:
+ tags:
+ KubernetesCluster: "{{ openshift_aws_clusterid }}"
+ resource: "{{ item.group_id }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/tasks/main.yml b/roles/openshift_aws/tasks/ssh_keys.yml
index 232cf20ed..f439ce74e 100644
--- a/roles/openshift_aws_ssh_keys/tasks/main.yml
+++ b/roles/openshift_aws/tasks/ssh_keys.yml
@@ -3,6 +3,6 @@
ec2_key:
name: "{{ item.key_name }}"
key_material: "{{ item.pub_key }}"
- region: "{{ r_openshift_aws_ssh_keys_region }}"
- with_items: "{{ r_openshift_aws_ssh_keys_users }}"
+ region: "{{ openshift_aws_region }}"
+ with_items: "{{ openshift_aws_users }}"
no_log: True
diff --git a/roles/openshift_aws_vpc/tasks/main.yml b/roles/openshift_aws/tasks/vpc.yml
index cfe08dae5..ce2c8eac5 100644
--- a/roles/openshift_aws_vpc/tasks/main.yml
+++ b/roles/openshift_aws/tasks/vpc.yml
@@ -2,13 +2,12 @@
- name: Create AWS VPC
ec2_vpc_net:
state: present
- cidr_block: "{{ r_openshift_aws_vpc_cidr }}"
+ cidr_block: "{{ openshift_aws_vpc.cidr }}"
dns_support: True
dns_hostnames: True
- region: "{{ r_openshift_aws_vpc_region }}"
- name: "{{ r_openshift_aws_vpc_clusterid }}"
- tags:
- Name: "{{ r_openshift_aws_vpc_clusterid }}"
+ region: "{{ openshift_aws_region }}"
+ name: "{{ openshift_aws_clusterid }}"
+ tags: "{{ openshift_aws_vpc_tags }}"
register: vpc
- name: Sleep to avoid a race condition when creating the vpc
@@ -18,23 +17,23 @@
- name: assign the vpc igw
ec2_vpc_igw:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
register: igw
- name: assign the vpc subnets
ec2_vpc_subnet:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
vpc_id: "{{ vpc.vpc.id }}"
cidr: "{{ item.cidr }}"
az: "{{ item.az }}"
resource_tags:
Name: "{{ item.az }}"
- with_items: "{{ r_openshift_aws_vpc_subnets[r_openshift_aws_vpc_region] }}"
+ with_items: "{{ openshift_aws_vpc.subnets[openshift_aws_region] }}"
- name: Grab the route tables from our VPC
ec2_vpc_route_table_facts:
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
filters:
vpc-id: "{{ vpc.vpc.id }}"
register: route_table
@@ -44,9 +43,9 @@
lookup: id
route_table_id: "{{ route_table.route_tables[0].id }}"
vpc_id: "{{ vpc.vpc.id }}"
- region: "{{ r_openshift_aws_vpc_region }}"
+ region: "{{ openshift_aws_region }}"
tags:
- Name: "{{ r_openshift_aws_vpc_name }}"
+ Name: "{{ openshift_aws_vpc_name }}"
routes:
- dest: 0.0.0.0/0
gateway_id: igw
diff --git a/roles/openshift_aws_ami_copy/README.md b/roles/openshift_aws_ami_copy/README.md
deleted file mode 100644
index 111818451..000000000
--- a/roles/openshift_aws_ami_copy/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-openshift_aws_ami_perms
-=========
-
-Ansible role for copying an AMI
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- openshift_aws_ami_copy_src_ami: source AMI id to copy from
-- openshift_aws_ami_copy_region: region where the AMI is found
-- openshift_aws_ami_copy_name: name to assign to new AMI
-- openshift_aws_ami_copy_kms_arn: AWS IAM KMS arn of the key to use for encryption
-- openshift_aws_ami_copy_tags: dict with desired tags
-- openshift_aws_ami_copy_wait: wait for the ami copy to achieve available status. This fails due to boto waiters.
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: copy the ami for encrypted disks
- include_role:
- name: openshift_aws_ami_copy
- vars:
- r_openshift_aws_ami_copy_region: us-east-1
- r_openshift_aws_ami_copy_name: myami
- r_openshift_aws_ami_copy_src_ami: ami-1234
- r_openshift_aws_ami_copy_kms_arn: arn:xxxx
- r_openshift_aws_ami_copy_tags: {}
- r_openshift_aws_ami_copy_encrypt: False
-
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_ami_copy/tasks/main.yml b/roles/openshift_aws_ami_copy/tasks/main.yml
deleted file mode 100644
index bcccd4042..000000000
--- a/roles/openshift_aws_ami_copy/tasks/main.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- fail:
- msg: "{{ item }} needs to be defined"
- when: item is not defined
- with_items:
- - r_openshift_aws_ami_copy_src_ami
- - r_openshift_aws_ami_copy_name
- - r_openshift_aws_ami_copy_region
-
-- name: "Create copied AMI image and wait: {{ r_openshift_aws_ami_copy_wait | default(False) }}"
- ec2_ami_copy:
- region: "{{ r_openshift_aws_ami_copy_region }}"
- source_region: "{{ r_openshift_aws_ami_copy_region }}"
- name: "{{ r_openshift_aws_ami_copy_name }}"
- source_image_id: "{{ r_openshift_aws_ami_copy_src_ami }}"
- encrypted: "{{ r_openshift_aws_ami_copy_encrypt | default(False) }}"
- kms_key_id: "{{ r_openshift_aws_ami_copy_kms_arn | default(omit) }}"
- wait: "{{ r_openshift_aws_ami_copy_wait | default(omit) }}"
- tags: "{{ r_openshift_aws_ami_copy_tags }}"
- register: copy_result
-
-- debug: var=copy_result
-
-- name: return AMI ID with setfact - openshift_aws_ami_copy_retval_custom_ami
- set_fact:
- r_openshift_aws_ami_copy_retval_custom_ami: "{{ copy_result.image_id }}"
diff --git a/roles/openshift_aws_elb/README.md b/roles/openshift_aws_elb/README.md
deleted file mode 100644
index ecc45fa14..000000000
--- a/roles/openshift_aws_elb/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-openshift_aws_elb
-=========
-
-Ansible role to provision and manage AWS ELB's for Openshift.
-
-Requirements
-------------
-
-Ansible Modules:
-
-- ec2_elb
-- ec2_elb_lb
-
-python package:
-
-python-boto
-
-Role Variables
---------------
-
-- r_openshift_aws_elb_instances: instances to put in ELB
-- r_openshift_aws_elb_elb_name: name of elb
-- r_openshift_aws_elb_security_group_names: list of SGs (by name) that the ELB will belong to
-- r_openshift_aws_elb_region: AWS Region
-- r_openshift_aws_elb_health_check: definition of the ELB health check. See ansible docs for ec2_elb
-```yaml
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-```
-- r_openshift_aws_elb_listeners: definition of the ELB listeners. See ansible docs for ec2_elb
-```yaml
-- protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
-- protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_elb
- vars:
- r_openshift_aws_elb_instances: aws_instances_to_put_in_elb
- r_openshift_aws_elb_elb_name: elb_name
- r_openshift_aws_elb_security_groups: security_group_names
- r_openshift_aws_elb_region: aws_region
- r_openshift_aws_elb_health_check: "{{ elb_health_check_definition }}"
- r_openshift_aws_elb_listeners: "{{ elb_listeners_definition }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_elb/defaults/main.yml b/roles/openshift_aws_elb/defaults/main.yml
deleted file mode 100644
index ed5d38079..000000000
--- a/roles/openshift_aws_elb/defaults/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-r_openshift_aws_elb_health_check:
- ping_protocol: tcp
- ping_port: 443
- response_timeout: 5
- interval: 30
- unhealthy_threshold: 2
- healthy_threshold: 2
-
-r_openshift_aws_elb_cert_arn: ''
-
-r_openshift_aws_elb_listeners:
- master:
- external:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: ssl
- instance_port: 443
- - protocol: ssl
- load_balancer_port: 443
- instance_protocol: ssl
- instance_port: 443
- # ssl certificate required for https or ssl
- ssl_certificate_id: "{{ r_openshift_aws_elb_cert_arn }}"
- internal:
- - protocol: tcp
- load_balancer_port: 80
- instance_protocol: tcp
- instance_port: 80
- - protocol: tcp
- load_balancer_port: 443
- instance_protocol: tcp
- instance_port: 443
diff --git a/roles/openshift_aws_elb/meta/main.yml b/roles/openshift_aws_elb/meta/main.yml
deleted file mode 100644
index 58be652a5..000000000
--- a/roles/openshift_aws_elb/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift ELB provisioning
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_elb/tasks/main.yml b/roles/openshift_aws_elb/tasks/main.yml
deleted file mode 100644
index 64ec18545..000000000
--- a/roles/openshift_aws_elb/tasks/main.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: fetch the default subnet id
- ec2_remote_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters: "{{ r_openshift_aws_elb_instance_filter }}"
- register: instancesout
-
-- name: fetch the default subnet id
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_elb_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_elb_subnet_name }}"
- register: subnetout
-
-- name:
- debug:
- msg: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
-
-- name: "Create ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb_lb:
- name: "{{ r_openshift_aws_elb_name }}"
- state: present
- security_group_names: "{{ r_openshift_aws_elb_security_groups }}"
- idle_timeout: "{{ r_openshift_aws_elb_idle_timout }}"
- region: "{{ r_openshift_aws_elb_region }}"
- subnets:
- - "{{ subnetout.subnets[0].id }}"
- health_check: "{{ r_openshift_aws_elb_health_check }}"
- listeners: "{{ r_openshift_aws_elb_listeners[r_openshift_aws_elb_type][r_openshift_aws_elb_direction]
- if 'master' in r_openshift_aws_elb_type or 'infra' in r_openshift_aws_elb_type
- else r_openshift_aws_elb_listeners }}"
- scheme: "{{ r_openshift_aws_elb_scheme }}"
- tags:
- KubernetesCluster: "{{ r_openshift_aws_elb_clusterid }}"
- register: new_elb
-
-# It is necessary to ignore_errors here because the instances are not in 'ready'
-# state when first added to ELB
-- name: "Add instances to ELB {{ r_openshift_aws_elb_name }}"
- ec2_elb:
- instance_id: "{{ item.id }}"
- ec2_elbs: "{{ r_openshift_aws_elb_name }}"
- state: present
- region: "{{ r_openshift_aws_elb_region }}"
- wait: False
- with_items: "{{ instancesout.instances }}"
- ignore_errors: True
- retries: 10
- register: elb_call
- until: elb_call|succeeded
-
-- debug:
- msg: "{{ item }}"
- with_items:
- - "{{ new_elb }}"
diff --git a/roles/openshift_aws_iam_kms/README.md b/roles/openshift_aws_iam_kms/README.md
deleted file mode 100644
index 9468e785c..000000000
--- a/roles/openshift_aws_iam_kms/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_iam_kms
-=========
-
-Ansible role to create AWS IAM KMS keys for encryption
-
-Requirements
-------------
-
-Ansible Modules:
-
-oo_iam_kms
-
-Role Variables
---------------
-
-- r_openshift_aws_iam_kms_region: AWS region to create KMS key
-- r_openshift_aws_iam_kms_alias: Alias name to assign to created KMS key
-
-Dependencies
-------------
-
-lib_utils
-
-Example Playbook
-----------------
-```yaml
-- include_role:
- name: openshift_aws_iam_kms
- vars:
- r_openshift_aws_iam_kms_region: 'us-east-1'
- r_openshift_aws_iam_kms_alias: 'alias/clusterABC_kms'
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_iam_kms/defaults/main.yml b/roles/openshift_aws_iam_kms/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_iam_kms/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_iam_kms/meta/main.yml b/roles/openshift_aws_iam_kms/meta/main.yml
deleted file mode 100644
index e29aaf96b..000000000
--- a/roles/openshift_aws_iam_kms/meta/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: AWS IAM KMS setup and management
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
-- lib_utils
diff --git a/roles/openshift_aws_iam_kms/tasks/main.yml b/roles/openshift_aws_iam_kms/tasks/main.yml
deleted file mode 100644
index 32aac2666..000000000
--- a/roles/openshift_aws_iam_kms/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- fail:
- msg: "{{ item.name }} needs to be defined."
- when: item.cond | bool
- with_items:
- - name: "{{ r_openshift_aws_iam_kms_alias }}"
- cond: "{{ r_openshift_aws_iam_kms_alias is undefined }}"
- - name: "{{ r_openshift_aws_iam_kms_region }}"
- cond: "{{ r_openshift_aws_iam_kms_region is undefined }}"
-
-- name: Create IAM KMS key with alias
- oo_iam_kms:
- state: present
- alias: "{{ r_openshift_aws_iam_kms_alias }}"
- region: "{{ r_openshift_aws_iam_kms_region }}"
- register: created_kms
-
-- debug: var=created_kms.results
diff --git a/roles/openshift_aws_launch_config/README.md b/roles/openshift_aws_launch_config/README.md
deleted file mode 100644
index 52b7e83b6..000000000
--- a/roles/openshift_aws_launch_config/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-openshift_aws_launch_config
-=========
-
-Ansible role to create an AWS launch config for a scale group.
-
-This includes the AMI, volumes, user_data, etc.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
-- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
-- r_openshift_aws_launch_config_region: "{{ region }}"
-- r_openshift_aws_launch_config: "{{ node_group_config }}"
-```yaml
- master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_launch_config_type: compute
-- r_openshift_aws_launch_config_custom_image: ami-xxxxx
-- r_openshift_aws_launch_config_bootstrap_token: <string of kubeconfig>
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: create compute nodes config
- include_role:
- name: openshift_aws_launch_config
- vars:
- r_openshift_aws_launch_config_name: "{{ launch_config_name }}"
- r_openshift_aws_launch_config_clusterid: "{{ clusterid }}"
- r_openshift_aws_launch_config_region: "{{ region }}"
- r_openshift_aws_launch_config: "{{ node_group_config }}"
- r_openshift_aws_launch_config_type: compute
- r_openshift_aws_launch_config_custom_image: ami-1234
- r_openshift_aws_launch_config_bootstrap_token: abcd
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_launch_config/defaults/main.yml b/roles/openshift_aws_launch_config/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_launch_config/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_aws_launch_config/meta/main.yml b/roles/openshift_aws_launch_config/meta/main.yml
deleted file mode 100644
index e61670cc2..000000000
--- a/roles/openshift_aws_launch_config/meta/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Openshift AWS VPC creation
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 2.3
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/openshift_aws_launch_config/tasks/main.yml b/roles/openshift_aws_launch_config/tasks/main.yml
deleted file mode 100644
index 437cf1f71..000000000
--- a/roles/openshift_aws_launch_config/tasks/main.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: fail when params are not set
- fail:
- msg: Please specify the role parameters.
- when:
- - r_openshift_aws_launch_config_cluseterid is undefined
- - r_openshift_aws_launch_config_type is undefined
- - r_openshift_aws_launch_config_region is undefined
- - r_openshift_aws_launch_config is undefined
-
-- name: fetch the security groups for launch config
- ec2_group_facts:
- filters:
- group-name:
- - "{{ r_openshift_aws_launch_config_clusterid }}" # default sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}" # node type sg
- - "{{ r_openshift_aws_launch_config_clusterid }}_{{ r_openshift_aws_launch_config_type }}_k8s" # node type sg k8s
- region: "{{ r_openshift_aws_launch_config_region }}"
- register: ec2sgs
-
-# Create the scale group config
-- name: Create the node scale group config
- ec2_lc:
- name: "{{ r_openshift_aws_launch_config_name }}"
- region: "{{ r_openshift_aws_launch_config_region }}"
- image_id: "{{ r_openshift_aws_launch_config_custom_image if 'ami-' in r_openshift_aws_launch_config_custom_image else r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].ami }}"
- instance_type: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].instance_type }}"
- security_groups: "{{ ec2sgs.security_groups | map(attribute='group_id')| list }}"
- user_data: |-
- #cloud-config
- {% if r_openshift_aws_launch_config_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ r_openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ r_openshift_aws_launch_config_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
- key_name: "{{ r_openshift_aws_launch_config.ssh_key_name }}"
- ebs_optimized: False
- volumes: "{{ r_openshift_aws_launch_config[r_openshift_aws_launch_config_type].volumes }}"
- assign_public_ip: True
- register: test
diff --git a/roles/openshift_aws_launch_config/templates/cloud-init.j2 b/roles/openshift_aws_launch_config/templates/cloud-init.j2
deleted file mode 100644
index 1a1e29550..000000000
--- a/roles/openshift_aws_launch_config/templates/cloud-init.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if r_openshift_aws_launch_config_bootstrap_token is defined and r_openshift_aws_launch_config_bootstrap_token is not '' %}
-#cloud-config
-write_files:
-- path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: |-
- {{ r_openshift_aws_launch_config_bootstrap_token }}
-{% endif %}
diff --git a/roles/openshift_aws_node_group/README.md b/roles/openshift_aws_node_group/README.md
deleted file mode 100644
index c32c57bc5..000000000
--- a/roles/openshift_aws_node_group/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-openshift_aws_node_group
-=========
-
-Ansible role to create an aws node group.
-
-This includes the security group, launch config, and scale group.
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-```yaml
-- r_openshift_aws_node_group_name: myscalegroup
-- r_openshift_aws_node_group_clusterid: myclusterid
-- r_openshift_aws_node_group_region: us-east-1
-- r_openshift_aws_node_group_lc_name: launch_config
-- r_openshift_aws_node_group_type: master|infra|compute
-- r_openshift_aws_node_group_config: "{{ node_group_config }}"
-```yaml
-master:
- instance_type: m4.xlarge
- ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
-```
-- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-
-```yaml
-us-east-1a # name of subnet
-```
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
- - name: "create {{ openshift_build_node_type }} node groups"
- include_role:
- name: openshift_aws_node_group
- vars:
- r_openshift_aws_node_group_name: "{{ clusterid }} openshift compute"
- r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}"
- r_openshift_aws_node_group_clusterid: "{{ clusterid }}"
- r_openshift_aws_node_group_region: "{{ region }}"
- r_openshift_aws_node_group_config: "{{ node_group_config }}"
- r_openshift_aws_node_group_type: compute
- r_openshift_aws_node_group_subnet_name: "{{ subnet_name }}"
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_node_group/defaults/main.yml b/roles/openshift_aws_node_group/defaults/main.yml
deleted file mode 100644
index 44c5116a1..000000000
--- a/roles/openshift_aws_node_group/defaults/main.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-r_openshift_aws_node_group_type: master
-
-r_openshift_aws_node_group_config:
- tags:
- clusterid: "{{ r_openshift_aws_node_group_clusterid }}"
- master:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: False
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 3
- desired_size: 3
- tags:
- host-type: master
- sub-host-type: default
- wait_for_instances: True
- compute:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 3
- max_size: 100
- desired_size: 3
- tags:
- host-type: node
- sub-host-type: compute
- infra:
- instance_type: m4.xlarge
- ami: "{{ r_openshift_aws_node_group_ami }}"
- volumes:
- - device_name: /dev/sdb
- volume_size: 100
- device_type: gp2
- delete_on_termination: True
- health_check:
- period: 60
- type: EC2
- min_size: 2
- max_size: 20
- desired_size: 2
- tags:
- host-type: node
- sub-host-type: infra
diff --git a/roles/openshift_aws_node_group/tasks/main.yml b/roles/openshift_aws_node_group/tasks/main.yml
deleted file mode 100644
index 6f5364b03..000000000
--- a/roles/openshift_aws_node_group/tasks/main.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: validate role inputs
- fail:
- msg: Please pass in the required role variables
- when:
- - r_openshift_aws_node_group_clusterid is not defined
- - r_openshift_aws_node_group_region is not defined
- - r_openshift_aws_node_group_subnet_name is not defined
-
-- name: fetch the subnet to use in scale group
- ec2_vpc_subnet_facts:
- region: "{{ r_openshift_aws_node_group_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_node_group_subnet_name }}"
- register: subnetout
-
-- name: Create the scale group
- ec2_asg:
- name: "{{ r_openshift_aws_node_group_name }}"
- launch_config_name: "{{ r_openshift_aws_node_group_lc_name }}"
- health_check_period: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.period }}"
- health_check_type: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].health_check.type }}"
- min_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].min_size }}"
- max_size: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].max_size }}"
- desired_capacity: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].desired_size }}"
- region: "{{ r_openshift_aws_node_group_region }}"
- termination_policies: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].termination_policy if 'termination_policy' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- load_balancers: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].elbs if 'elbs' in r_openshift_aws_node_group_config[r_openshift_aws_node_group_type] else omit }}"
- wait_for_instances: "{{ r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].wait_for_instances | default(False)}}"
- vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
- tags:
- - "{{ r_openshift_aws_node_group_config.tags | combine(r_openshift_aws_node_group_config[r_openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws_s3/README.md b/roles/openshift_aws_s3/README.md
deleted file mode 100644
index afafe61cf..000000000
--- a/roles/openshift_aws_s3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-openshift_aws_s3
-=========
-
-Ansible role to create an s3 bucket
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_s3_clusterid: myclusterid
-- r_openshift_aws_s3_region: us-east-1
-- r_openshift_aws_s3_mode: create|delete
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create an s3 bucket
- include_role:
- name: openshift_aws_s3
- vars:
- r_openshift_aws_s3_clusterid: mycluster
- r_openshift_aws_s3_region: us-east-1
- r_openshift_aws_s3_mode: create
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_s3/tasks/main.yml b/roles/openshift_aws_s3/tasks/main.yml
deleted file mode 100644
index 46bd781bd..000000000
--- a/roles/openshift_aws_s3/tasks/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Create an s3 bucket
- s3:
- bucket: "{{ r_openshift_aws_s3_clusterid }}"
- mode: "{{ r_openshift_aws_s3_mode }}"
- region: "{{ r_openshift_aws_s3_region }}"
diff --git a/roles/openshift_aws_sg/README.md b/roles/openshift_aws_sg/README.md
deleted file mode 100644
index eeb76bbb6..000000000
--- a/roles/openshift_aws_sg/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-openshift_aws_sg
-=========
-
-Ansible role to create an aws security groups
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_sg_clusterid: myclusterid
-- r_openshift_aws_sg_region: us-east-1
-- r_openshift_aws_sg_type: master|infra|compute
-```yaml
-# defaults/main.yml
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
-```
-
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-- name: create security groups for master
- include_role:
- name: openshift_aws_sg
- vars:
- r_openshift_aws_sg_clusterid: mycluster
- r_openshift_aws_sg_region: us-east-1
- r_openshift_aws_sg_type: master
-```
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_sg/defaults/main.yml b/roles/openshift_aws_sg/defaults/main.yml
deleted file mode 100644
index 9c480d337..000000000
--- a/roles/openshift_aws_sg/defaults/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-r_openshift_aws_sg_sg:
- default:
- name: "{{ r_openshift_aws_sg_clusterid }}"
- desc: "{{ r_openshift_aws_sg_clusterid }} default"
- rules:
- - proto: tcp
- from_port: 22
- to_port: 22
- cidr_ip: 0.0.0.0/0
- - proto: all
- from_port: all
- to_port: all
- group_name: "{{ r_openshift_aws_sg_clusterid }}"
- master:
- name: "{{ r_openshift_aws_sg_clusterid }}_master"
- desc: "{{ r_openshift_aws_sg_clusterid }} master instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- compute:
- name: "{{ r_openshift_aws_sg_clusterid }}_compute"
- desc: "{{ r_openshift_aws_sg_clusterid }} compute node instances"
- infra:
- name: "{{ r_openshift_aws_sg_clusterid }}_infra"
- desc: "{{ r_openshift_aws_sg_clusterid }} infra node instances"
- rules:
- - proto: tcp
- from_port: 80
- to_port: 80
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 443
- to_port: 443
- cidr_ip: 0.0.0.0/0
- - proto: tcp
- from_port: 30000
- to_port: 32000
- cidr_ip: 0.0.0.0/0
- etcd:
- name: "{{ r_openshift_aws_sg_clusterid }}_etcd"
- desc: "{{ r_openshift_aws_sg_clusterid }} etcd instances"
diff --git a/roles/openshift_aws_sg/tasks/main.yml b/roles/openshift_aws_sg/tasks/main.yml
deleted file mode 100644
index 2294fdcc9..000000000
--- a/roles/openshift_aws_sg/tasks/main.yml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Validate role inputs
- fail:
- msg: Please ensure to pass the correct variables
- when:
- - r_openshift_aws_sg_region is undefined
- - r_openshift_aws_sg_region is undefined
-
-
-- name: Fetch the VPC for vpc.id
- ec2_vpc_net_facts:
- region: "{{ r_openshift_aws_sg_region }}"
- filters:
- "tag:Name": "{{ r_openshift_aws_sg_clusterid }}"
- register: vpcout
-
-- name: Create default security group for cluster
- ec2_group:
- name: "{{ r_openshift_aws_sg_sg.default.name }}"
- description: "{{ r_openshift_aws_sg_sg.default.desc }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- rules: "{{ r_openshift_aws_sg_sg.default.rules | default(omit, True)}}"
- register: sg_default_created
-
-- name: create the node group sgs
- ec2_group:
- name: "{{ item.name}}"
- description: "{{ item.desc }}"
- rules: "{{ item.rules if 'rules' in item else [] }}"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type]}}"
-
-- name: create the k8s sgs for the node group
- ec2_group:
- name: "{{ item.name }}_k8s"
- description: "{{ item.desc }} for k8s"
- region: "{{ r_openshift_aws_sg_region }}"
- vpc_id: "{{ vpcout.vpcs[0].id }}"
- register: k8s_sg_create
- with_items:
- - "{{ r_openshift_aws_sg_sg[r_openshift_aws_sg_type] }}"
-
-- name: tag sg groups with proper tags
- ec2_tag:
- tags:
- KubernetesCluster: "{{ r_openshift_aws_sg_clusterid }}"
- resource: "{{ item.group_id }}"
- region: "{{ r_openshift_aws_sg_region }}"
- with_items: "{{ k8s_sg_create.results }}"
diff --git a/roles/openshift_aws_ssh_keys/README.md b/roles/openshift_aws_ssh_keys/README.md
deleted file mode 100644
index 4f8667918..000000000
--- a/roles/openshift_aws_ssh_keys/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-openshift_aws_ssh_keys
-=========
-
-Ansible role for sshind SSH keys
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_ssh_keys_users: list of dicts of users
-- r_openshift_aws_ssh_keys_region: ec2_region to install the keys
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-```yaml
-users:
-- username: user1
- pub_key: <user1 ssh public key>
-- username: user2
- pub_key: <user2 ssh public key>
-
-region: us-east-1
-
-- include_role:
- name: openshift_aws_ssh_keys
- vars:
- r_openshift_aws_ssh_keys_users: "{{ users }}"
- r_openshift_aws_ssh_keys_region: "{{ region }}"
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/README.md b/roles/openshift_aws_vpc/README.md
deleted file mode 100644
index d88cf0581..000000000
--- a/roles/openshift_aws_vpc/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-openshift_aws_vpc
-=========
-
-Ansible role to create a default AWS VPC
-
-Requirements
-------------
-
-Ansible Modules:
-
-
-Role Variables
---------------
-
-- r_openshift_aws_vpc_clusterid: "{{ clusterid }}"
-- r_openshift_aws_vpc_cidr: 172.31.48.0/20
-- r_openshift_aws_vpc_subnets: "{{ subnets }}"
-```yaml
- subnets:
- us-east-1: # These are us-east-1 region defaults. Ensure this matches your region
- - cidr: 172.31.48.0/20
- az: "us-east-1c"
- - cidr: 172.31.32.0/20
- az: "us-east-1e"
- - cidr: 172.31.16.0/20
- az: "us-east-1a"
-```
-- r_openshift_aws_vpc_region: "{{ region }}"
-- r_openshift_aws_vpc_tags: dict of tags to apply to vpc
-- r_openshift_aws_vpc_name: "{{ vpc_name | default(clusterid) }}"
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-```yaml
- - name: create default vpc
- include_role:
- name: openshift_aws_vpc
- vars:
- r_openshift_aws_vpc_clusterid: mycluster
- r_openshift_aws_vpc_cidr: 172.31.48.0/20
- r_openshift_aws_vpc_subnets: "{{ subnets }}"
- r_openshift_aws_vpc_region: us-east-1
- r_openshift_aws_vpc_tags: {}
- r_openshift_aws_vpc_name: mycluster
-
-```
-
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift
diff --git a/roles/openshift_aws_vpc/defaults/main.yml b/roles/openshift_aws_vpc/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/openshift_aws_vpc/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
index 8aa57e75a..b82c2e602 100644
--- a/roles/openshift_cfme/defaults/main.yml
+++ b/roles/openshift_cfme/defaults/main.yml
@@ -27,9 +27,6 @@ openshift_cfme_pv_data:
# Tuning parameter to use more than 5 images at once from an ImageStream
openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"
-openshift_cfme_nfs_directory: "/exports"
# TODO: Refactor '_install_app' variable. This is just for testing but
# maybe in the future it should control the entire yes/no for CFME.
#
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_cfme/meta/main.yml
index 9200f2c3c..162d817f0 100644
--- a/roles/openshift_cfme/meta/main.yml
+++ b/roles/openshift_cfme/meta/main.yml
@@ -16,5 +16,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: openshift_common
- role: openshift_master_facts
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
index 8db45492e..ca04628a8 100644
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ b/roles/openshift_cfme/tasks/nfs.yml
@@ -1,6 +1,13 @@
---
# Tasks to statically provision NFS volumes
# Include if not using dynamic volume provisioning
+
+- name: Set openshift_cfme_nfs_server fact
+ when: openshift_cfme_nfs_server is not defined
+ set_fact:
+ # Hostname/IP of the NFS server. Currently defaults to first master
+ openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
+
- name: Ensure the /exports/ directory exists
file:
path: /exports/
diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml
index 04a1ce873..29ed82783 100644
--- a/roles/openshift_cli/meta/main.yml
+++ b/roles/openshift_cli/meta/main.yml
@@ -14,5 +14,4 @@ galaxy_info:
dependencies:
- role: openshift_docker
when: not skip_docker_role | default(False) | bool
-- role: openshift_common
- role: openshift_facts
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
deleted file mode 100644
index 2a271854b..000000000
--- a/roles/openshift_common/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-OpenShift/Atomic Enterprise Common
-===================================
-
-OpenShift/Atomic Enterprise common installation and configuration tasks.
-
-Requirements
-------------
-
-A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
-rhel-7-server-extra-rpms, and rhel-7-server-ose-3.0-rpms repos.
-
-Role Variables
---------------
-
-| Name | Default value | |
-|---------------------------|-------------------|---------------------------------------------|
-| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
-| openshift_ip | UNDEF | Internal IP address to use for this host |
-| openshift_public_hostname | UNDEF | Public hostname to use for this host |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_portal_net | UNDEF | Service IP CIDR |
-
-Dependencies
-------------
-
-os_firewall
-openshift_facts
-openshift_repos
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
deleted file mode 100644
index 267c03605..000000000
--- a/roles/openshift_common/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_cluster_id: 'default'
-openshift_debug_level: 2
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
deleted file mode 100644
index 7cc95d8fa..000000000
--- a/roles/openshift_common/meta/main.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-galaxy_info:
- author: Jason DeTiberus
- description: OpenShift Common
- company: Red Hat, Inc.
- license: Apache License, Version 2.0
- min_ansible_version: 1.7
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - cloud
-dependencies:
-- role: openshift_facts
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
deleted file mode 100644
index a0bd6c860..000000000
--- a/roles/openshift_common/tasks/main.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- fail:
- msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Nuage sdn can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with flannel
- when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Contiv can not be used with nuage
- when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
- when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
-
-- fail:
- msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
- when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
-
-- fail:
- msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
- when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
-
-- fail:
- msg: openshift_hostname must be 63 characters or less
- when: openshift_hostname is defined and openshift_hostname | length > 63
-
-- name: Set common Cluster facts
- openshift_facts:
- role: common
- local_facts:
- install_examples: "{{ openshift_install_examples | default(True) }}"
- use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
- use_flannel: "{{ openshift_use_flannel | default(None) }}"
- use_calico: "{{openshift_use_calico | default(None) }}"
- use_nuage: "{{ openshift_use_nuage | default(None) }}"
- use_contiv: "{{ openshift_use_contiv | default(None) }}"
- use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
- data_dir: "{{ openshift_data_dir | default(None) }}"
- use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
-
-- name: Install the base package for versioning
- package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when: not openshift.common.is_containerized | bool
-
-- name: Set version facts
- openshift_facts:
-
-# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
-# hostname by default.
-- set_fact:
- set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}"
-
-- name: Set hostname
- command: >
- hostnamectl set-hostname {{ openshift.common.hostname }}
- when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml
index 5cfda1c89..f3fe2dcbe 100644
--- a/roles/openshift_examples/meta/main.yml
+++ b/roles/openshift_examples/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index cf78b4a75..ebfa6bb8f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -449,78 +449,6 @@ def normalize_provider_facts(provider, metadata):
return facts
-def set_flannel_facts_if_unset(facts):
- """ Set flannel facts if not already present in facts dict
- dict: the facts dict updated with the flannel facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the flannel
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_flannel' not in facts['common']:
- use_flannel = False
- facts['common']['use_flannel'] = use_flannel
- return facts
-
-
-def set_calico_facts_if_unset(facts):
- """ Set calico facts if not already present in facts dict
- dict: the facts dict updated with the calico facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the calico
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_calico' not in facts['common']:
- use_calico = False
- facts['common']['use_calico'] = use_calico
- return facts
-
-
-def set_nuage_facts_if_unset(facts):
- """ Set nuage facts if not already present in facts dict
- dict: the facts dict updated with the nuage facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the nuage
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_nuage' not in facts['common']:
- use_nuage = False
- facts['common']['use_nuage'] = use_nuage
- return facts
-
-
-def set_contiv_facts_if_unset(facts):
- """ Set contiv facts if not already present in facts dict
- dict: the facts dict updated with the contiv facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the contiv
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_contiv' not in facts['common']:
- use_contiv = False
- facts['common']['use_contiv'] = use_contiv
- return facts
-
-
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -590,13 +518,8 @@ def set_dnsmasq_facts_if_unset(facts):
"""
if 'common' in facts:
- if 'use_dnsmasq' not in facts['common']:
- facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
if 'master' in facts and 'dns_port' not in facts['master']:
- if safe_get_bool(facts['common']['use_dnsmasq']):
- facts['master']['dns_port'] = 8053
- else:
- facts['master']['dns_port'] = 53
+ facts['master']['dns_port'] = 8053
return facts
@@ -968,27 +891,6 @@ def set_version_facts_if_unset(facts):
return facts
-def set_manageiq_facts_if_unset(facts):
- """ Set manageiq facts. This currently includes common.use_manageiq.
-
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with version facts.
- Raises:
- OpenShiftFactsInternalError:
- """
- if 'common' not in facts:
- if 'version_gte_3_1_or_1_1' not in facts['common']:
- raise OpenShiftFactsInternalError(
- "Invalid invocation: The required facts are not set"
- )
- if 'use_manageiq' not in facts['common']:
- facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
-
- return facts
-
-
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
@@ -999,15 +901,6 @@ def set_sdn_facts_if_unset(facts, system_facts):
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
- # pylint: disable=too-many-branches
- if 'common' in facts:
- use_sdn = facts['common']['use_openshift_sdn']
- if not (use_sdn == '' or isinstance(use_sdn, bool)):
- use_sdn = safe_get_bool(use_sdn)
- facts['common']['use_openshift_sdn'] = use_sdn
- if 'sdn_network_plugin_name' not in facts['common']:
- plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
- facts['common']['sdn_network_plugin_name'] = plugin
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
@@ -1996,10 +1889,6 @@ class OpenShiftFacts(object):
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
- facts = set_flannel_facts_if_unset(facts)
- facts = set_calico_facts_if_unset(facts)
- facts = set_nuage_facts_if_unset(facts)
- facts = set_contiv_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
facts = set_identity_providers_if_unset(facts)
@@ -2011,7 +1900,6 @@ class OpenShiftFacts(object):
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_dnsmasq_facts_if_unset(facts)
- facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
@@ -2039,7 +1927,7 @@ class OpenShiftFacts(object):
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr)
- defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
+ defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
@@ -2048,10 +1936,8 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- install_examples=True,
debug_level=2,
- config_base='/etc/origin',
- data_dir='/var/lib/origin')
+ config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 8d35db6b5..d02a43655 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -187,7 +187,7 @@ def normalize(checks):
def run_check(name, check, user_disabled_checks):
"""Run a single check if enabled and return a result dict."""
- if name in user_disabled_checks:
+ if name in user_disabled_checks or '*' in user_disabled_checks:
return dict(skipped=True, skipped_reason="Disabled by user request")
# pylint: disable=broad-except; capturing exceptions broadly is intentional,
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 349655966..dcaf87eca 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -10,6 +10,7 @@ import traceback
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
from ansible.utils.color import stringc
+from ansible.module_utils.six import string_types
FAILED_NO_MSG = u'Failed without returning a message.'
@@ -140,11 +141,19 @@ def deduplicate_failures(failures):
Returns a new list of failures such that identical failures from different
hosts are grouped together in a single entry. The relative order of failures
is preserved.
+
+ If failures is unhashable, the original list of failures is returned.
"""
groups = defaultdict(list)
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
- groups[group_key].append(failure)
+ try:
+ groups[group_key].append(failure)
+ except TypeError:
+ # abort and return original list of failures when failures has an
+ # unhashable type.
+ return failures
+
result = []
for failure in failures:
group_key = tuple(sorted((key, value) for key, value in failure.items() if key != 'host'))
@@ -159,7 +168,10 @@ def format_failure(failure):
"""Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
by a newline separator when output to the user."""
- host = u', '.join(failure['host'])
+ if isinstance(failure['host'], string_types):
+ host = failure['host']
+ else:
+ host = u', '.join(failure['host'])
play = failure['play']
task = failure['task']
msg = failure['msg']
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index c8769b511..db3c0b654 100644
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -26,15 +26,13 @@ from ansible.module_utils.six import string_types
YUM_IMPORT_EXCEPTION = None
DNF_IMPORT_EXCEPTION = None
-PKG_MGR = None
try:
import yum # pylint: disable=import-error
- PKG_MGR = "yum"
except ImportError as err:
YUM_IMPORT_EXCEPTION = err
+
try:
import dnf # pylint: disable=import-error
- PKG_MGR = "dnf"
except ImportError as err:
DNF_IMPORT_EXCEPTION = err
@@ -51,14 +49,19 @@ def main():
module = AnsibleModule(
argument_spec=dict(
package_list=dict(type="list", required=True),
+ package_mgr=dict(type="str", required=True),
),
supports_check_mode=True
)
- if YUM_IMPORT_EXCEPTION and DNF_IMPORT_EXCEPTION:
+ # determine the package manager to use
+ package_mgr = module.params['package_mgr']
+ if package_mgr not in ('yum', 'dnf'):
+ module.fail_json(msg="package_mgr must be one of: yum, dnf")
+ pkg_mgr_exception = dict(yum=YUM_IMPORT_EXCEPTION, dnf=DNF_IMPORT_EXCEPTION)[package_mgr]
+ if pkg_mgr_exception:
module.fail_json(
- msg="aos_version module could not import yum or dnf: %s %s" %
- (YUM_IMPORT_EXCEPTION, DNF_IMPORT_EXCEPTION)
+ msg="aos_version module could not import {}: {}".format(package_mgr, pkg_mgr_exception)
)
# determine the packages we will look for
@@ -78,7 +81,7 @@ def main():
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkg_names)
+ pkgs = _retrieve_available_packages(package_mgr, expected_pkg_names)
if versioned_pkgs:
_check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
_check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
@@ -93,7 +96,7 @@ def _to_dict(pkg_list):
return {pkg["name"]: pkg for pkg in pkg_list}
-def _retrieve_available_packages(expected_pkgs):
+def _retrieve_available_packages(pkg_mgr, expected_pkgs):
# The openshift excluder prevents unintended updates to openshift
# packages by setting yum excludes on those packages. See:
# https://wiki.centos.org/SpecialInterestGroup/PaaS/OpenShift-Origin-Control-Updates
@@ -103,14 +106,15 @@ def _retrieve_available_packages(expected_pkgs):
# be excluded. So, for our purposes here, disable excludes to see
# what will really be available during an install or upgrade.
- if PKG_MGR == "yum":
+ if pkg_mgr == "yum":
# search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
yb.conf.disable_excludes = ['all']
try:
- pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ pkgs = yb.rpmdb.returnPackages(patterns=expected_pkgs)
+ pkgs += yb.pkgSack.returnPackages(patterns=expected_pkgs)
except yum.Errors.PackageSackError as excinfo:
# you only hit this if *none* of the packages are available
raise AosVersionException('\n'.join([
@@ -118,7 +122,7 @@ def _retrieve_available_packages(expected_pkgs):
'Check your subscription and repo settings.',
str(excinfo),
]))
- elif PKG_MGR == "dnf":
+ elif pkg_mgr == "dnf":
dbase = dnf.Base() # pyling: disable=invalid-name
dbase.conf.disable_excludes = ['all']
@@ -127,8 +131,11 @@ def _retrieve_available_packages(expected_pkgs):
dquery = dbase.sack.query()
aquery = dquery.available()
+ iquery = dquery.installed()
- pkgs = list(aquery.filter(name=expected_pkgs))
+ available_pkgs = list(aquery.filter(name=expected_pkgs))
+ installed_pkgs = list(iquery.filter(name=expected_pkgs))
+ pkgs = available_pkgs + installed_pkgs
if not pkgs:
# pkgs list is empty, raise because no expected packages found
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index 02ee1d0f9..987c955b6 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -4,6 +4,7 @@ Health checks for OpenShift clusters.
import operator
import os
+import time
from abc import ABCMeta, abstractmethod, abstractproperty
from importlib import import_module
@@ -57,6 +58,9 @@ class OpenShiftCheck(object):
self._execute_module = execute_module
self.task_vars = task_vars or {}
self.tmp = tmp
+ # mainly for testing purposes; see execute_module_with_retries
+ self._module_retries = 3
+ self._module_retry_interval = 5 # seconds
# set to True when the check changes the host, for accurate total "changed" count
self.changed = False
@@ -115,6 +119,19 @@ class OpenShiftCheck(object):
)
return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+ def execute_module_with_retries(self, module_name, module_args):
+ """Run execute_module and retry on failure."""
+ result = {}
+ tries = 0
+ while True:
+ res = self.execute_module(module_name, module_args)
+ if tries > self._module_retries or not res.get("failed"):
+ result.update(res)
+ return result
+ result["last_failed"] = res
+ tries += 1
+ time.sleep(self._module_retry_interval)
+
def get_var(self, *keys, **kwargs):
"""Get deeply nested values from task_vars.
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index 857a80c74..9c35f0f92 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -32,6 +32,12 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
+ skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+
+ def __init__(self, *args, **kwargs):
+ super(DockerImageAvailability, self).__init__(*args, **kwargs)
+ # record whether we could reach a registry or not (and remember results)
+ self.reachable_registries = {}
def is_active(self):
"""Skip hosts with unsupported deployment types."""
@@ -63,13 +69,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- return {
- "failed": True,
- "msg": (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}"
- ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
- }
+ registries = [
+ reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
+ for reg in registries
+ ]
+ msg = (
+ "One or more required Docker images are not available:\n {}\n"
+ "Configured registries: {}\n"
+ "Checked by: {}"
+ ).format(
+ ",\n ".join(sorted(unavailable_images)),
+ ", ".join(registries),
+ self.skopeo_img_check_command
+ )
+
+ return dict(failed=True, msg=msg)
return {}
@@ -125,31 +139,31 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- return [
- image for image in images
- if self.is_image_local(image)
- ]
+ registries = self.known_docker_registries()
+ found_images = []
+ for image in images:
+ # docker could have the image name as-is or prefixed with any registry
+ imglist = [image] + [reg + "/" + image for reg in registries]
+ if self.is_image_local(imglist):
+ found_images.append(image)
+ return found_images
def is_image_local(self, image):
"""Check if image is already in local docker index."""
result = self.execute_module("docker_image_facts", {"name": image})
- if result.get("failed", False):
- return False
-
- return bool(result.get("images", []))
+ return bool(result.get("images")) and not result.get("failed")
def known_docker_registries(self):
"""Build a list of docker registries available according to inventory vars."""
- docker_facts = self.get_var("openshift", "docker")
- regs = set(docker_facts["additional_registries"])
+ regs = list(self.get_var("openshift.docker.additional_registries", default=[]))
deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin":
- regs.update(["docker.io"])
- elif "enterprise" in deployment_type:
- regs.update(["registry.access.redhat.com"])
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif "enterprise" in deployment_type and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
- return list(regs)
+ return regs
def available_images(self, images, default_registries):
"""Search remotely for images. Returns: list of images found."""
@@ -162,18 +176,35 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Use Skopeo to determine if required image exists in known registry(s)."""
registries = default_registries
- # if image already includes a registry, only use that
+ # If image already includes a registry, only use that.
+ # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
+ # registry.access.redhat.com/rhel7 as if the registry were a namespace.
+ # It's not clear that there's any way to distinguish them, but fortunately
+ # the current set of images all look like [registry/]namespace/name[:version].
if image.count("/") > 1:
registry, image = image.split("/", 1)
registries = [registry]
for registry in registries:
- args = {
- "_raw_params": "timeout 10 skopeo inspect --tls-verify=false "
- "docker://{}/{}".format(registry, image)
- }
- result = self.execute_module("command", args)
+ if registry not in self.reachable_registries:
+ self.reachable_registries[registry] = self.connect_to_registry(registry)
+ if not self.reachable_registries[registry]:
+ continue
+
+ args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
+ result = self.execute_module_with_retries("command", args)
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
+ if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
+ self.reachable_registries[registry] = False
return False
+
+ def connect_to_registry(self, registry):
+ """Use ansible wait_for module to test connectivity from host to registry. Returns bool."""
+ # test a simple TCP connection
+ host, _, port = registry.partition(":")
+ port = port or 443
+ args = dict(host=host, port=port, state="started", timeout=30)
+ result = self.execute_module("wait_for", args)
+ return result.get("rc", 0) == 0 and not result.get("failed")
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index e9bae60a3..24f1d938a 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -36,7 +36,7 @@ class DockerHostMixin(object):
# NOTE: we would use the "package" module but it's actually an action plugin
# and it's not clear how to invoke one of those. This is about the same anyway:
- result = self.execute_module(
+ result = self.execute_module_with_retries(
self.get_var("ansible_pkg_mgr", default="yum"),
{"name": self.dependencies, "state": "present"},
)
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index a86180b00..21355c2f0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -26,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
@staticmethod
def master_packages(rpm_prefix):
diff --git a/roles/openshift_health_checker/openshift_checks/package_update.py b/roles/openshift_health_checker/openshift_checks/package_update.py
index 1e9aecbe0..8464e8a5e 100644
--- a/roles/openshift_health_checker/openshift_checks/package_update.py
+++ b/roles/openshift_health_checker/openshift_checks/package_update.py
@@ -11,4 +11,4 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
def run(self):
args = {"packages": []}
- return self.execute_module("check_yum_update", args)
+ return self.execute_module_with_retries("check_yum_update", args)
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 8b780114f..d4aec3ed8 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -46,6 +46,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
check_multi_minor_release = deployment_type in ['openshift-enterprise']
args = {
+ "package_mgr": self.get_var("ansible_pkg_mgr"),
"package_list": [
{
"name": "openvswitch",
@@ -75,7 +76,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
],
}
- return self.execute_module("aos_version", args)
+ return self.execute_module_with_retries("aos_version", args)
def get_required_ovs_version(self):
"""Return the correct Open vSwitch version(s) for the current OpenShift version."""
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index c109ebd24..58864da21 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -110,11 +110,16 @@ def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 8d0a53df9..6a7c16c7e 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,6 +3,23 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
@@ -15,12 +32,10 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@@ -30,10 +45,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +57,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,53 +68,36 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
+def test_all_images_available_remotely(task_vars, available_locally):
def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
- def execute_module(module_name=None, *_):
- if module_name == "command":
- return {
- 'failed': True,
- }
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
- return {
- 'changed': False,
- }
+ return {} # docker_image_facts failure
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='latest',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual['failed']
assert "required Docker images are not available" in actual['msg']
@@ -125,62 +113,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
+def test_skopeo_update_failure(task_vars, message, extra_words):
def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ task_vars['openshift']['docker']['additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
+ ),
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
def execute_module(module_name=None, *_):
- return {
- 'changed': False,
- }
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
- actual = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
- ),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- )).run()
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -257,7 +246,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -271,4 +260,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index 1fe648b75..8aa87ca59 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -56,7 +56,7 @@ def test_package_availability(task_vars, must_have_packages, must_not_have_packa
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
result = PackageAvailability(execute_module, task_vars).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 06489b0d7..7d9035a36 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -9,7 +9,7 @@ def test_package_update():
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
result = PackageUpdate(execute_module).run()
- assert result is return_value
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 6054d3f3e..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -5,6 +5,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
def task_vars_for(openshift_release, deployment_type):
return dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type=deployment_type)),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
@@ -27,6 +28,7 @@ def test_openshift_version_not_supported():
def test_invalid_openshift_release_format():
task_vars = dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
@@ -50,7 +52,7 @@ def test_invalid_openshift_release_format():
])
def test_package_version(openshift_release):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
@@ -64,7 +66,7 @@ def test_package_version(openshift_release):
check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
@@ -77,7 +79,7 @@ def test_package_version(openshift_release):
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- return_value = object()
+ return_value = {"foo": object()}
def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
@@ -91,7 +93,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
result = check.run()
- assert result is return_value
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
index 0fc258133..69f27653c 100644
--- a/roles/openshift_health_checker/test/zz_failure_summary_test.py
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -65,6 +65,21 @@ import pytest
},
],
),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md
index 3e5d7f860..29ae58556 100644
--- a/roles/openshift_hosted/README.md
+++ b/roles/openshift_hosted/README.md
@@ -39,7 +39,6 @@ variables also control configuration behavior:
Dependencies
------------
-* openshift_common
* openshift_hosted_facts
Example Playbook
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index c26df3afa..08c1d849e 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -47,3 +47,9 @@ r_openshift_hosted_registry_os_firewall_allow:
- service: Docker Registry Port
port: 5000/tcp
cond: "{{ r_openshift_hosted_use_calico }}"
+
+# NOTE
+# r_openshift_hosted_use_calico_default may be defined external to this role.
+# openshift_use_calico, if defined, may affect other roles or play behavior.
+r_openshift_hosted_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+r_openshift_hosted_use_calico: "{{ r_openshift_hosted_use_calico_default }}"
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index 3e424da12..d73c290ff 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -61,6 +61,14 @@
openshift_hosted_registry_env_vars: "{{ openshift_hosted_registry_env_vars | combine({'OPENSHIFT_DEFAULT_REGISTRY':'docker-registry.default.svc:5000'}) }}"
when: openshift_push_via_dns | default(false) | bool
+- name: Update registry proxy settings for dc/docker-registry
+ set_fact:
+ openshift_hosted_registry_env_vars: "{{ {'HTTPS_PROXY': (openshift.common.https_proxy | default('')),
+ 'HTTP_PROXY': (openshift.common.http_proxy | default('')),
+ 'NO_PROXY': (openshift.common.no_proxy | default(''))}
+ | combine(openshift_hosted_registry_env_vars) }}"
+ when: (openshift.common.https_proxy | default(False)) or (openshift.common.http_proxy | default('')) != ''
+
- name: Create the registry service account
oc_serviceaccount:
name: "{{ openshift_hosted_registry_serviceaccount }}"
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index e57ed733e..68ec7233e 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -18,6 +18,15 @@
openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+- name: Get the certificate contents for router
+ copy:
+ backup: True
+ dest: "/etc/origin/master/{{ item | basename }}"
+ src: "{{ item }}"
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
+ oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
+ when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {}
+
# This is for when we desire a cluster signed cert
# The certificate is generated and placed in master_config_dir/
- block:
@@ -43,15 +52,6 @@
# End Block
when: ( openshift_hosted_router_create_certificate | bool ) and openshift_hosted_router_certificate == {}
-- name: Get the certificate contents for router
- copy:
- backup: True
- dest: "/etc/origin/master/{{ item | basename }}"
- src: "{{ item }}"
- with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
- oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: not openshift_hosted_router_create_certificate | bool
-
- name: Create the router service account(s)
oc_serviceaccount:
name: "{{ item.serviceaccount }}"
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
index 044c8043c..ab07a77c1 100644
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ b/roles/openshift_hosted_logging/meta/main.yaml
@@ -1,4 +1,3 @@
---
dependencies:
- - { role: openshift_common }
- { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml
index 9c12865bf..4027f524b 100644
--- a/roles/openshift_hosted_templates/meta/main.yml
+++ b/roles/openshift_hosted_templates/meta/main.yml
@@ -11,5 +11,4 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- role: openshift_common
+dependencies: []
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index 41a2b12a2..239b16427 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -24,4 +24,10 @@ r_openshift_loadbalancer_os_firewall_allow:
port: "{{ openshift_master_api_port | default(8443) }}/tcp"
- service: nuage mon
port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
- cond: "{{ openshift_use_nuage | default(false) | bool }}"
+ cond: "{{ r_openshift_lb_use_nuage | bool }}"
+
+# NOTE
+# r_openshift_lb_use_nuage_default may be defined external to this role.
+# openshift_use_nuage, if defined, may affect other roles or play behavior.
+r_openshift_lb_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_lb_use_nuage: "{{ r_openshift_lb_use_nuage_default }}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index 6431f86d9..e74918a40 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -44,6 +44,8 @@ spec:
cpu: "{{curator_cpu_limit}}"
{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+ requests:
+ memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
index 97525479e..95bf462d1 100644
--- a/roles/openshift_logging_curator/vars/main.yml
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_curator_version: "3_5"
-__allowed_curator_versions: ["3_5", "3_6"]
+__latest_curator_version: "3_6"
+__allowed_curator_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 0c06a7677..65b08d970 100644
--- a/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -24,7 +24,8 @@ network:
cloud:
kubernetes:
- service: ${SERVICE_DNS}
+ pod_label: ${POD_LABEL}
+ pod_port: 9300
namespace: ${NAMESPACE}
discovery:
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index cbe6b89f2..3c8f390c4 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -48,7 +48,7 @@ spec:
cpu: "{{es_cpu_limit}}"
{% endif %}
requests:
- memory: "512Mi"
+ memory: "{{es_memory_limit}}"
ports:
-
containerPort: 9200
@@ -90,6 +90,12 @@ spec:
name: "RECOVER_AFTER_TIME"
value: "{{openshift_logging_elasticsearch_recover_after_time}}"
-
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
+ name: "POD_LABEL"
+ value: "component={{component}}"
+ -
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -106,6 +112,13 @@ spec:
readOnly: true
- name: elasticsearch-storage
mountPath: /elasticsearch/persistent
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/java/elasticsearch/probe/readiness.sh"
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
+ periodSeconds: 5
volumes:
- name: elasticsearch
secret:
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
index 20fa63543..09e2ee4d0 100644
--- a/roles/openshift_logging_elasticsearch/vars/main.yml
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -1,6 +1,6 @@
---
-__latest_es_version: "3_5"
-__allowed_es_versions: ["3_5", "3_6"]
+__latest_es_version: "3_6"
+__allowed_es_versions: ["3_5", "3_6", "3_7"]
__allowed_es_types: ["data-master", "data-client", "master", "client"]
__es_log_appenders: ['file', 'console']
__kibana_index_modes: ["unique", "shared_ops"]
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index 88e039e3f..a4afb6618 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -36,6 +36,8 @@ spec:
limits:
cpu: {{ openshift_logging_fluentd_cpu_limit }}
memory: {{ openshift_logging_fluentd_memory_limit }}
+ requests:
+ memory: {{ openshift_logging_fluentd_memory_limit }}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
index ec8e565c3..92a426952 100644
--- a/roles/openshift_logging_fluentd/vars/main.yml
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -1,5 +1,5 @@
---
-__latest_fluentd_version: "3_5"
-__allowed_fluentd_versions: ["3_5", "3_6"]
+__latest_fluentd_version: "3_6"
+__allowed_fluentd_versions: ["3_5", "3_6", "3_7"]
__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
__allowed_mux_client_modes: ["minimal", "maximal"]
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 512d99d06..da1386d3e 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -46,6 +46,8 @@ spec:
{% endif %}
{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+ requests:
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
{% endif %}
env:
@@ -82,6 +84,8 @@ spec:
{% endif %}
{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+ requests:
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
index 87b281c4b..241877a02 100644
--- a/roles/openshift_logging_kibana/vars/main.yml
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_kibana_version: "3_5"
-__allowed_kibana_versions: ["3_5", "3_6"]
+__latest_kibana_version: "3_6"
+__allowed_kibana_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 70afe5cee..ff18d3270 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -45,6 +45,8 @@ spec:
{% endif %}
{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+ requests:
+ memory: "{{mux_memory_limit}}"
{% endif %}
{% endif %}
ports:
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
index 4234b74e2..e7b57f4b5 100644
--- a/roles/openshift_logging_mux/vars/main.yml
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -1,3 +1,3 @@
---
-__latest_mux_version: "3_5"
-__allowed_mux_versions: ["3_5", "3_6"]
+__latest_mux_version: "3_6"
+__allowed_mux_versions: ["3_5", "3_6", "3_7"]
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index fbf69c270..86fa57b50 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -17,7 +17,6 @@ From this role:
| Name | Default value | |
|---------------------------------------------------|-----------------------|-------------------------------------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
| oreg_url | UNDEF | Default docker registry to use |
| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
@@ -29,18 +28,10 @@ From this role:
| openshift_master_public_console_url | UNDEF | |
| openshift_master_saconfig_limit_secret_references | false | |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF | Public IP address to use for this host |
-| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index d70106276..71bb09a76 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -22,5 +22,24 @@ r_openshift_master_os_firewall_allow:
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+
+
+# NOTE
+# r_openshift_master_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+r_openshift_master_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+r_openshift_master_use_openshift_sdn: "{{ r_openshift_master_use_openshift_sdn_default }}"
+
+r_openshift_master_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
+
+r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+
+r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
+
+r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index bd2383f61..b0237141b 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -29,4 +29,4 @@ dependencies:
- role: nickhammond.logrotate
- role: contiv
contiv_role: netmaster
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(False) | bool
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index a06defdb9..121261e94 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -47,9 +47,9 @@
when:
- not openshift.common.is_containerized | bool
-- name: Create openshift.common.data_dir
+- name: Create r_openshift_master_data_dir
file:
- path: "{{ openshift.common.data_dir }}"
+ path: "{{ r_openshift_master_data_dir }}"
state: directory
mode: 0755
owner: root
@@ -169,7 +169,7 @@
register: l_already_set
- set_fact:
- openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+ openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
- name: Set fact of all etcd host IPs
openshift_facts:
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 782a35abe..7a918c57e 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -7,9 +7,16 @@
# openshift_master_config_dir is set.
- name: Set openshift_master_config_dir if unset
set_fact:
- openshift_master_config_dir: '/var/lib/origin'
+ openshift_master_config_dir: '/etc/origin/master'
when: openshift_master_config_dir is not defined
+# This play may be consumed outside the role, we need to ensure that
+# r_openshift_master_data_dir is set.
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
- name: Remove the legacy master service if it exists
include: clean_systemd_units.yml
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index e8f7c47b0..f06448d71 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,12 +12,12 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 69db62f16..b7f36491b 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,12 +11,12 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ r_openshift_master_data_dir }}:{{ r_openshift_master_data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index c14579435..d045b402b 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -106,7 +106,7 @@ etcdConfig:
clientCA: ca.crt
{% endif %}
keyFile: etcd.server.key
- storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd
+ storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
{% endif %}
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
@@ -179,8 +179,8 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.use_contiv or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.common.portal_net }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
index 0e78d2d23..02bfd6f62 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2
@@ -13,7 +13,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier=atomic-openshift-master-api
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index 94928f88c..e284413f7 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -17,7 +17,7 @@ Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=131072
LimitCORE=infinity
-WorkingDirectory={{ openshift.common.data_dir }}
+WorkingDirectory={{ r_openshift_master_data_dir }}
SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
Restart=always
RestartSec=5s
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e767772ce..5558f55cb 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -383,7 +383,7 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
if 'extraAuthorizeParameters' in self._idp:
if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
- self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
+ self._idp['extraAuthorizeParameters']['include_granted_scopes'] = '"true"' if val else '"false"'
def validate(self):
''' validate this idp instance '''
diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml
index 2e2013d40..d6756f9b9 100644
--- a/roles/openshift_metrics/tasks/pre_install.yaml
+++ b/roles/openshift_metrics/tasks/pre_install.yaml
@@ -10,7 +10,7 @@
is invalid, must be one of: emptydir, pv, dynamic
when:
- openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
- - "not {{ openshift_metrics_heapster_standalone | bool }}"
+ - not (openshift_metrics_heapster_standalone | bool)
- name: list existing secrets
command: >
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index fb0b494da..32670b18e 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -17,22 +17,12 @@ From this role:
| Name | Default value | |
|----------------------------|-----------------------|----------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
| oreg_url | UNDEF (Optional) | Default docker registry to use |
| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
-
Dependencies
------------
-openshift_common
Example Playbook
----------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index a7dad5b1f..f1e64f3aa 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -69,10 +69,10 @@ r_openshift_node_os_firewall_allow:
port: 443/tcp
- service: OpenShift OVS sdn
port: 4789/udp
- cond: openshift.common.use_openshift_sdn | default(true) | bool
+ cond: openshift_use_openshift_sdn | bool
- service: Calico BGP Port
port: 179/tcp
- cond: "{{ openshift.common.use_calico | bool }}"
+ cond: "{{ openshift_node_use_calico }}"
- service: Kubernetes service NodePort TCP
port: "{{ openshift_node_port_range | default('') }}/tcp"
cond: "{{ openshift_node_port_range is defined }}"
@@ -82,5 +82,27 @@ r_openshift_node_os_firewall_allow:
oreg_url: ''
oreg_host: "{{ oreg_url.split('/')[0] if '.' in oreg_url.split('/')[0] else '' }}"
-oreg_auth_credentials_path: "{{ openshift.common.data_dir }}/.docker"
+oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
+
+
+# NOTE
+# r_openshift_node_*_default may be defined external to this role.
+# openshift_use_*, if defined, may affect other roles or play behavior.
+openshift_node_use_openshift_sdn_default: "{{ openshift_use_openshift_sdn | default(True) }}"
+openshift_node_use_openshift_sdn: "{{ openshift_node_use_openshift_sdn_default }}"
+
+openshift_node_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
+openshift_node_sdn_network_plugin_name: "{{ openshift_node_sdn_network_plugin_name_default }}"
+
+openshift_node_use_calico_default: "{{ openshift_use_calico | default(False) }}"
+openshift_node_use_calico: "{{ openshift_node_use_calico_default }}"
+
+openshift_node_use_nuage_default: "{{ openshift_use_nuage | default(False) }}"
+openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
+
+openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
+openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 14ba48aba..855b0a8d8 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,7 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | default(true) | bool
+ when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 3db980514..ce5ecb9d0 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,11 +15,9 @@ dependencies:
- role: openshift_node_facts
- role: lib_openshift
- role: lib_os_firewall
-- role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
when: not openshift_node_bootstrap
- role: openshift_cloud_provider
- role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index cb1440283..b83b2c452 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -42,14 +42,25 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
-- name: Remove RPM generated config files if present
- file:
- path: "/etc/origin/{{ item }}"
- state: absent
- when:
- - rpmgenerated_config.stat.exists
- - openshift_deployment_type in ['openshift-enterprise', 'atomic-enterprise']
- with_items:
- - master
- - node
- - .config_managed
+- when: rpmgenerated_config.stat.exists
+ block:
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ with_items:
+ - master
+
+ # with_fileglob doesn't work correctly due to a few issues.
+ # Could change this to fileglob when it gets fixed.
+ - name: find all files in /etc/origin/node so we can remove them
+ find:
+ path: /etc/origin/node/
+ register: find_results
+
+ - name: Remove everything except the resolv.conf required for node
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 8210fd881..7af3f54b5 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -22,7 +22,7 @@
daemon_reload: yes
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | default(true) | bool
register: ovs_start_result
until: not ovs_start_result | failed
retries: 3
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 9bf4ed879..02b8ee67c 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -13,7 +13,7 @@
name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- name: Install conntrack-tools package
package:
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 60a25dcc6..22ff6dfd2 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -49,6 +49,13 @@
state: restarted
when: openshift_use_crio | default(false)
+- name: restart NetworkManager to ensure resolv.conf is present
+ systemd:
+ name: NetworkManager
+ enabled: yes
+ state: restarted
+ when: openshift_node_bootstrap | bool
+
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
@@ -121,4 +128,4 @@
##### END Storage #####
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_node_use_openshift_sdn | default(true) | bool
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 4687400cd..6b4490f61 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -26,7 +26,7 @@
- name: Install OpenvSwitch system containers
include: openvswitch_system_container.yml
when:
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- openshift.common.is_openvswitch_system_container | bool
- block:
@@ -39,7 +39,7 @@
- include: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_node_use_openshift_sdn | bool
- not openshift.common.is_openvswitch_system_container | bool
- include: config/configure-node-settings.yml
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 711afcadb..7049f7189 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -21,8 +21,6 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
- remote
container-runtime-endpoint:
- /var/run/crio.sock
- experimental-cri:
- - 'true'
image-service-endpoint:
- /var/run/crio.sock
node-labels:
@@ -39,15 +37,15 @@ masterClientConnectionOverrides:
qps: 100
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
-{% if openshift.common.use_openshift_sdn | bool %}
-networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool %}
+networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.use_contiv | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
- networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
@@ -68,7 +66,7 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
+volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 639b6f6c8..57094f28e 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
index 84035b88c..d80ed1b72 100644
--- a/roles/openshift_node_dnsmasq/meta/main.yml
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -12,5 +12,4 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_common
- role: openshift_node_facts
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 4e6229bfb..5ad994df9 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -32,14 +32,12 @@ From openshift.common:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
| openshift.common.config_base |---------------------|---------------------|
-| openshift.common.data_dir |---------------------|---------------------|
| openshift.common.hostname |---------------------|---------------------|
| openshift.common.http_proxy |---------------------|---------------------|
| openshift.common.is_atomic |---------------------|---------------------|
| openshift.common.is_containerized |---------------------|---------------------|
| openshift.common.portal_net |---------------------|---------------------|
| openshift.common.service_type |---------------------|---------------------|
-| openshift.common.use_openshift_sdn |---------------------|---------------------|
From openshift.master:
@@ -58,7 +56,7 @@ From openshift.node:
Dependencies
------------
-openshift_common
+
TODO
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index ed97d539c..3d8704308 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1 +1,6 @@
---
+openshift_use_openshift_sdn: True
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+
+openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index d31b899cf..90d80855e 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -6,7 +6,7 @@
when:
- not skip_node_svc_handlers | default(False) | bool
- not (ovs_service_status_changed | default(false) | bool)
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
register: l_openshift_node_upgrade_stop_openvswitch_result
until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
retries: 3
diff --git a/roles/openshift_node_upgrade/meta/main.yml b/roles/openshift_node_upgrade/meta/main.yml
index 2a36d8945..a810b01dc 100644
--- a/roles/openshift_node_upgrade/meta/main.yml
+++ b/roles/openshift_node_upgrade/meta/main.yml
@@ -11,4 +11,3 @@ galaxy_info:
- 7
dependencies:
- role: lib_utils
-- role: openshift_common
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index bc092c26c..e34319186 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -44,7 +44,7 @@
changed_when: "'Downloaded newer image' in pull_result.stdout"
when:
- openshift.common.is_containerized | bool
- - openshift.common.use_openshift_sdn | default(true) | bool
+ - openshift_use_openshift_sdn | bool
- include: docker/upgrade.yml
vars:
@@ -142,7 +142,7 @@
# End Disable Swap Block
- name: Reset selinux context
- command: restorecon -RF {{ openshift.common.data_dir }}/openshift.local.volumes
+ command: restorecon -RF {{ openshift_node_data_dir }}/openshift.local.volumes
when:
- ansible_selinux is defined
- ansible_selinux.status == 'enabled'
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index 4e9550150..afff2f8ba 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -4,7 +4,7 @@
# - openshift_image_tag
# - openshift.common.is_containerized
# - openshift.node.ovs_image
-# - openshift.common.use_openshift_sdn
+# - openshift_use_openshift_sdn
# - openshift.common.service_type
# - openshift.node.debug_level
# - openshift.common.config_base
@@ -28,10 +28,10 @@
when: openshift.common.is_containerized | bool
- include: config/workaround-bz1331590-ovs-oom-fix.yml
- when: openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift_use_openshift_sdn | bool
- include: config/install-ovs-docker-service-file.yml
- when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | default(true) | bool
+ when: openshift.common.is_containerized | bool and openshift_use_openshift_sdn | bool
- include: config/configure-node-settings.yml
- include: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 639b6f6c8..451412ab0 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -4,7 +4,7 @@ After={{ openshift.docker.service_name }}.service
After=openvswitch.service
PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
-{% if openshift.common.use_openshift_sdn %}
+{% if openshift_use_openshift_sdn %}
Wants=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
@@ -21,7 +21,7 @@ EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift_node_data_dir }}:{{ openshift_node_data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md
index 1489cb0bd..0407d6ef1 100644
--- a/roles/openshift_persistent_volumes/README.md
+++ b/roles/openshift_persistent_volumes/README.md
@@ -17,13 +17,6 @@ From this role:
| persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes |
-From openshift_common:
-
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-
-
Dependencies
------------
diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml
index 25e5a38dd..8d3d010e4 100644
--- a/roles/openshift_persistent_volumes/meta/main.yml
+++ b/roles/openshift_persistent_volumes/meta/main.yml
@@ -10,5 +10,4 @@ galaxy_info:
versions:
- 7
dependencies:
-- role: openshift_common
- role: openshift_hosted_facts
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
new file mode 100644
index 000000000..c5a44bffb
--- /dev/null
+++ b/roles/openshift_prometheus/README.md
@@ -0,0 +1,95 @@
+OpenShift Prometheus
+====================
+
+OpenShift Prometheus Installation
+
+Requirements
+------------
+
+
+Role Variables
+--------------
+
+For default values, see [`defaults/main.yaml`](defaults/main.yaml).
+
+- `openshift_prometheus_state`: present - install/update. absent - uninstall.
+
+- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
+ deployed.
+
+- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
+
+- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
+
+- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+
+## Storage related variables
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: alertmanager
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+```
+
+## Additional Alert Rules file variable
+An external file with alert rules can be added by setting path to additional rules variable:
+```
+openshift_prometheus_additional_rules_file: <PATH>
+```
+
+File content should be in prometheus alert rules format.
+Following example sets rule to fire an alert when one of the cluster nodes is down:
+
+```
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
+ - alert: Node Down
+ expr: up{job="kubernetes-nodes"} == 0
+ annotations:
+ miqTarget: "ContainerNode"
+ severity: "HIGH"
+ message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down"
+```
+
+
+## Additional variables to control resource limits
+Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting
+the corresponding role variable:
+```
+openshift_prometheus_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
+```
+e.g
+```
+openshift_prometheus_alertmanager_limits_memory: 1Gi
+openshift_prometheus_oath_proxy_requests_cpu: 100
+```
+
+Dependencies
+------------
+
+openshift_facts
+
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
new file mode 100644
index 000000000..18d6a1645
--- /dev/null
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -0,0 +1,74 @@
+---
+# defaults file for openshift_prometheus
+openshift_prometheus_state: present
+
+openshift_prometheus_namespace: prometheus
+
+openshift_prometheus_replicas: 1
+openshift_prometheus_node_selector: {"region":"infra"}
+
+# images
+openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
+openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertbuffer: "ilackarms/message-buffer"
+
+# additional prometheus rules file
+openshift_prometheus_additional_rules_file: null
+
+# All the required exports
+openshift_prometheus_pv_exports:
+ - prometheus
+ - prometheus-alertmanager
+ - prometheus-alertbuffer
+# PV template files and their created object names
+openshift_prometheus_pv_data:
+ - pv_name: prometheus
+ pv_template: prom-pv-server.yml
+ pv_label: Prometheus Server PV
+ - pv_name: prometheus-alertmanager
+ pv_template: prom-pv-alertmanager.yml
+ pv_label: Prometheus Alertmanager PV
+ - pv_name: prometheus-alertbuffer
+ pv_template: prom-pv-alertbuffer.yml
+ pv_label: Prometheus Alert Buffer PV
+
+# Hostname/IP of the NFS server. Currently defaults to first master
+openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
+
+# storage
+openshift_prometheus_storage_type: pvc
+openshift_prometheus_pvc_name: prometheus
+openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_pvc_pv_selector: {}
+
+openshift_prometheus_alertmanager_storage_type: pvc
+openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
+openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertmanager_pvc_pv_selector: {}
+
+openshift_prometheus_alertbuffer_storage_type: pvc
+openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
+openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
+openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+
+# container resources
+openshift_prometheus_cpu_limit: null
+openshift_prometheus_memory_limit: null
+openshift_prometheus_cpu_requests: null
+openshift_prometheus_memory_requests: null
+openshift_prometheus_alertmanager_cpu_limit: null
+openshift_prometheus_alertmanager_memory_limit: null
+openshift_prometheus_alertmanager_cpu_requests: null
+openshift_prometheus_alertmanager_memory_requests: null
+openshift_prometheus_alertbuffer_cpu_limit: null
+openshift_prometheus_alertbuffer_memory_limit: null
+openshift_prometheus_alertbuffer_cpu_requests: null
+openshift_prometheus_alertbuffer_memory_requests: null
+openshift_prometheus_oauth_proxy_cpu_limit: null
+openshift_prometheus_oauth_proxy_memory_limit: null
+openshift_prometheus_oauth_proxy_cpu_requests: null
+openshift_prometheus_oauth_proxy_memory_requests: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
new file mode 100644
index 000000000..3ccedb1fd
--- /dev/null
+++ b/roles/openshift_prometheus/files/openshift_prometheus.exports
@@ -0,0 +1,3 @@
+/exports/prometheus *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
+/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml
new file mode 100644
index 000000000..33188bb7e
--- /dev/null
+++ b/roles/openshift_prometheus/meta/main.yaml
@@ -0,0 +1,19 @@
+---
+galaxy_info:
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift prometheus integration for the cluster
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - openshift
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
new file mode 100644
index 000000000..4e79da05f
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/create_pvs.yaml
@@ -0,0 +1,36 @@
+---
+# Check for existance and then conditionally:
+# - evaluate templates
+# - PVs
+#
+# These tasks idempotently create required Prometheus PV objects. Do not
+# call this file directly. This file is intended to be ran as an
+# include that has a 'with_items' attached to it. Hence the use below
+# of variables like "{{ item.pv_label }}"
+
+- name: "Check if the {{ item.pv_label }} template has been created already"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ state: list
+ kind: pv
+ name: "{{ item.pv_name }}"
+ register: prom_pv_check
+
+# Skip all of this if the PV already exists
+- block:
+ - name: "Ensure the {{ item.pv_label }} template is evaluated"
+ template:
+ src: "{{ item.pv_template }}.j2"
+ dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
+
+ - name: "Ensure {{ item.pv_label }} is created"
+ oc_obj:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: pv
+ name: "{{ item.pv_name }}"
+ state: present
+ delete_after: True
+ files:
+ - "{{ tempdir }}/templates/{{ item.pv_template }}"
+ when:
+ - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
new file mode 100644
index 000000000..93bdda3e8
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -0,0 +1,241 @@
+---
+
+# namespace
+- name: Add prometheus project
+ oc_project:
+ state: "{{ state }}"
+ name: "{{ openshift_prometheus_namespace }}"
+ node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
+ description: Prometheus
+
+# secrets
+- name: Set alert and prometheus secrets
+ oc_secret:
+ state: "{{ state }}"
+ name: "{{ item }}-proxy"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ contents:
+ - path: session_secret
+ data: "{{ 43 | oo_random_word }}="
+ with_items:
+ - prometheus
+ - alerts
+
+# serviceaccount
+- name: create prometheus serviceaccount
+ oc_serviceaccount:
+ state: "{{ state }}"
+ name: prometheus
+ namespace: "{{ openshift_prometheus_namespace }}"
+ # TODO add annotations when supproted
+ # annotations:
+ # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+ secrets:
+ - prometheus-secrets
+ changed_when: no
+
+# TODO remove this when annotations are supported by oc_serviceaccount
+- name: annotate serviceaccount
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ serviceaccount prometheus
+ serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
+ serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
+
+
+# create clusterrolebinding for prometheus serviceaccount
+- name: Set cluster-reader permissions for prometheus
+ oc_adm_policy_user:
+ state: "{{ state }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
+
+
+######################################################################
+# NFS
+# In the case that we are not running on a cloud provider, volumes must be statically provisioned
+
+- include: nfs.yaml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
+
+
+# create prometheus and alerts services
+# TODO join into 1 task with loop
+- name: Create prometheus service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 8443
+ with_items:
+ - name: prometheus
+
+- name: Create alerts service
+ oc_service:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ selector:
+ app: prometheus
+ labels:
+ name: "{{ item.name }}"
+ # TODO add annotations when supported
+ # annotations:
+ # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls"
+ ports:
+ - port: 443
+ targetPort: 9443
+ with_items:
+ - name: alerts
+
+
+# Annotate services with secret name
+# TODO remove this when annotations are supported by oc_service
+- name: annotate prometheus service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service prometheus 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls'
+
+- name: annotate alerts service
+ command: >
+ {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+ service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'
+
+# create prometheus and alerts routes
+- name: create prometheus and alerts routes
+ oc_route:
+ state: "{{ state }}"
+ name: "{{ item.name }}"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ service_name: "{{ item.name }}"
+ tls_termination: reencrypt
+ with_items:
+ - name: prometheus
+ - name: alerts
+
+# Storage
+- name: create prometheus pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_pvc_size }}"
+ selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+
+- name: create alertmanager pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertmanager_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+
+- name: create alertbuffer pvc
+ oc_pvc:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ name: "{{ openshift_prometheus_alertbuffer_pvc_name }}"
+ access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
+ volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
+ selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+
+# create prometheus deployment
+- name: Set prometheus deployment template
+ template:
+ src: prometheus_deployment.j2
+ dest: "{{ tempdir }}/templates/prometheus.yaml"
+ vars:
+ namespace: "{{ openshift_prometheus_namespace }}"
+ prom_replicas: "{{ openshift_prometheus_replicas }}"
+
+- name: Set prometheus deployment
+ oc_obj:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ kind: deployment
+ files:
+ - "{{ tempdir }}/templates/prometheus.yaml"
+ delete_after: true
+
+# prometheus configmap
+# Copy the additional rules file if it is defined
+- name: Copy additional rules file to host
+ copy:
+ src: "{{ openshift_prometheus_additional_rules_file }}"
+ dest: "{{ tempdir }}/prometheus.additional.rules"
+ when:
+ - openshift_prometheus_additional_rules_file is defined
+ - openshift_prometheus_additional_rules_file is not none
+ - openshift_prometheus_additional_rules_file | trim | length > 0
+
+- stat:
+ path: "{{ tempdir }}/prometheus.additional.rules"
+ register: additional_rules_stat
+
+# The kubernetes version impacts the prometheus scraping endpoint
+# so gathering it before constructing the configmap
+- name: get oc version
+ oc_version:
+ register: oc_version
+
+- set_fact:
+ kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}"
+
+- template:
+ src: prometheus.yml.j2
+ dest: "{{ tempdir }}/prometheus.yml"
+ changed_when: no
+
+- template:
+ src: prometheus.rules.j2
+ dest: "{{ tempdir }}/prometheus.rules"
+ changed_when: no
+
+# In prometheus configmap create "additional.rules" section if file exists
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == True
+
+- name: Set prometheus configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ prometheus.rules: "{{ tempdir }}/prometheus.rules"
+ prometheus.yml: "{{ tempdir }}/prometheus.yml"
+ when: additional_rules_stat.stat.exists == False
+
+# alertmanager configmap
+- template:
+ src: alertmanager.yml.j2
+ dest: "{{ tempdir }}/alertmanager.yml"
+ changed_when: no
+
+- name: Set alertmanager configmap
+ oc_configmap:
+ state: "{{ state }}"
+ name: "prometheus-alerts"
+ namespace: "{{ openshift_prometheus_namespace }}"
+ from_file:
+ alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
new file mode 100644
index 000000000..523a64334
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+
+- name: Create temp directory for doing work in on target
+ command: mktemp -td openshift-prometheus-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+- include: install_prometheus.yaml
+ vars:
+ state: "{{ openshift_prometheus_state }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
new file mode 100644
index 000000000..0b45f2cee
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/nfs.yaml
@@ -0,0 +1,44 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+- name: Ensure the /exports/ directory exists
+ file:
+ path: /exports/
+ state: directory
+ mode: 0755
+ owner: root
+ group: root
+
+- name: Ensure the prom-pv0X export directories exist
+ file:
+ path: "/exports/{{ item }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+ with_items: "{{ openshift_prometheus_pv_exports }}"
+
+- name: Ensure the NFS exports for Prometheus PVs exist
+ copy:
+ src: openshift_prometheus.exports
+ dest: /etc/exports.d/openshift_prometheus.exports
+ register: nfs_exports_updated
+
+- name: Ensure the NFS export table is refreshed if exports were added
+ command: exportfs -ar
+ when:
+ - nfs_exports_updated.changed
+
+
+######################################################################
+# Create the required Prometheus PVs. Check out these online docs if you
+# need a refresher on includes looping with items:
+# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
+# * http://stackoverflow.com/a/35128533
+#
+# TODO: Handle the case where a PV template is updated in
+# openshift-ansible and the change needs to be landed on the managed
+# cluster.
+
+- include: create_pvs.yaml
+ with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/alertmanager.yml.j2 b/roles/openshift_prometheus/templates/alertmanager.yml.j2
new file mode 100644
index 000000000..6c432a3d0
--- /dev/null
+++ b/roles/openshift_prometheus/templates/alertmanager.yml.j2
@@ -0,0 +1,20 @@
+global:
+
+# The root route on which each incoming alert enters.
+route:
+ # default route if none match
+ receiver: alert-buffer-wh
+
+ # The labels by which incoming alerts are grouped together. For example,
+ # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
+ # be batched into a single group.
+ # TODO:
+ group_by: []
+
+ # All the above attributes are inherited by all child routes and can
+ # overwritten on each.
+
+receivers:
+- name: alert-buffer-wh
+ webhook_configs:
+ - url: http://localhost:9099/topics/alerts
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
new file mode 100644
index 000000000..55a5e19c3
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertbuffer
+ labels:
+ storage: prometheus-alertbuffer
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertbuffer
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
new file mode 100644
index 000000000..4ee518735
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus-alertmanager
+ labels:
+ storage: prometheus-alertmanager
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus-alertmanager
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
new file mode 100644
index 000000000..933bf0f60
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: prometheus
+ labels:
+ storage: prometheus
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /exports/prometheus
+ server: {{ openshift_prometheus_nfs_server }}
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus.rules.j2 b/roles/openshift_prometheus/templates/prometheus.rules.j2
new file mode 100644
index 000000000..e861dc127
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.rules.j2
@@ -0,0 +1,4 @@
+groups:
+- name: example-rules
+ interval: 30s # defaults to global interval
+ rules:
diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2
new file mode 100644
index 000000000..63430f834
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus.yml.j2
@@ -0,0 +1,174 @@
+rule_files:
+ - 'prometheus.rules'
+{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %}
+ - 'prometheus.additional.rules'
+{% endif %}
+
+
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # Keep only the default/kubernetes service endpoints for the https port. This
+ # will add targets for each API server which Kubernetes adds an endpoint to
+ # the default/kubernetes service.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+
+# Scrape config for nodes.
+#
+# Each node exposes a /metrics endpoint that contains operational metrics for
+# the Kubelet and other components.
+- job_name: 'kubernetes-nodes'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for controllers.
+#
+# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
+# the controllers.
+#
+# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via
+# endpoints.
+- job_name: 'kubernetes-controllers'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ # Keep only the default/kubernetes service endpoints for the https port, and then
+ # set the port to 8444. This is the default configuration for the controllers on OpenShift
+ # masters.
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+ action: keep
+ regex: default;kubernetes;https
+ - source_labels: [__address__]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+)
+ replacement: $1:8444
+
+# Scrape config for cAdvisor.
+#
+# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
+# reports container metrics for each running pod. Scrape those by default.
+- job_name: 'kubernetes-cadvisor'
+
+ scheme: https
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+{% if kubernetes_version | float() >= 1.7 | float() %}
+ metrics_path: /metrics/cadvisor
+{% else %}
+ metrics_path: /metrics
+{% endif %}
+
+ kubernetes_sd_configs:
+ - role: node
+
+ relabel_configs:
+ - action: labelmap
+ regex: __meta_kubernetes_node_label_(.+)
+
+# Scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# via the following annotations:
+#
+# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+# to set this to `https` & most likely set the `tls_config` of the scrape config.
+# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+# * `prometheus.io/port`: If the metrics are exposed on a different port to the
+# service then set this appropriately.
+- job_name: 'kubernetes-service-endpoints'
+
+ tls_config:
+ ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+ # TODO: this should be per target
+ insecure_skip_verify: true
+
+ kubernetes_sd_configs:
+ - role: endpoints
+
+ relabel_configs:
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+ action: keep
+ regex: true
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+ action: replace
+ target_label: __scheme__
+ regex: (https?)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+ action: replace
+ target_label: __metrics_path__
+ regex: (.+)
+ - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+ action: replace
+ target_label: __address__
+ regex: (.+)(?::\d+);(\d+)
+ replacement: $1:$2
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username]
+ action: replace
+ target_label: __basic_auth_username__
+ regex: (.+)
+ - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password]
+ action: replace
+ target_label: __basic_auth_password__
+ regex: (.+)
+ - action: labelmap
+ regex: __meta_kubernetes_service_label_(.+)
+ - source_labels: [__meta_kubernetes_namespace]
+ action: replace
+ target_label: kubernetes_namespace
+ - source_labels: [__meta_kubernetes_service_name]
+ action: replace
+ target_label: kubernetes_name
+
+alerting:
+ alertmanagers:
+ - scheme: http
+ static_configs:
+ - targets:
+ - "localhost:9093"
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus_deployment.j2
new file mode 100644
index 000000000..98c117f19
--- /dev/null
+++ b/roles/openshift_prometheus/templates/prometheus_deployment.j2
@@ -0,0 +1,240 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: prometheus
+ namespace: {{ namespace }}
+ labels:
+ app: prometheus
+spec:
+ replicas: {{ prom_replicas|default(1) }}
+ selector:
+ provider: openshift
+ matchLabels:
+ app: prometheus
+ template:
+ metadata:
+ name: prometheus
+ labels:
+ app: prometheus
+ spec:
+ serviceAccountName: prometheus
+{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
+ nodeSelector:
+{% for key, value in openshift_prometheus_node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+ containers:
+ # Deploy Prometheus behind an oauth proxy
+ - name: prom-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 8443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:8443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9090
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -skip-auth-regex=^/metrics
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: prometheus-tls
+ - mountPath: /etc/proxy/secrets
+ name: prometheus-secrets
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ - name: prometheus
+ args:
+ - --storage.tsdb.retention=6h
+ - --config.file=/etc/prometheus/prometheus.yml
+ - --web.listen-address=localhost:9090
+ image: "{{ openshift_prometheus_image_prometheus }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
+ memory: "{{openshift_prometheus_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
+ memory: "{{ openshift_prometheus_memory_limit }}"
+{% endif %}
+{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_cpu_limit}}"
+{% endif %}
+
+ volumeMounts:
+ - mountPath: /etc/prometheus
+ name: prometheus-config
+ - mountPath: /prometheus
+ name: prometheus-data
+
+ # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
+ - name: alerts-proxy
+ image: "{{ openshift_prometheus_image_proxy }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+ memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_oauth_proxy_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9443
+ name: web
+ args:
+ - -provider=openshift
+ - -https-address=:9443
+ - -http-address=
+ - -email-domain=*
+ - -upstream=http://localhost:9099
+ - -client-id=system:serviceaccount:{{ namespace }}:prometheus
+ - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
+ - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
+ - -tls-cert=/etc/tls/private/tls.crt
+ - -tls-key=/etc/tls/private/tls.key
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret-file=/etc/proxy/secrets/session_secret
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: alerts-tls
+ - mountPath: /etc/proxy/secrets
+ name: alerts-secrets
+
+ - name: alert-buffer
+ args:
+ - --storage-path=/alert-buffer/messages.db
+ image: "{{ openshift_prometheus_image_alertbuffer }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertbuffer_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertbuffer_cpu_limit}}"
+{% endif %}
+ volumeMounts:
+ - mountPath: /alert-buffer
+ name: alert-buffer-data
+ ports:
+ - containerPort: 9099
+ name: alert-buf
+
+ - name: alertmanager
+ args:
+ - -config.file=/etc/alertmanager/alertmanager.yml
+ image: "{{ openshift_prometheus_image_alertmanager }}"
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_requests}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_requests}}"
+{% endif %}
+ limits:
+{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
+ memory: "{{openshift_prometheus_alertmanager_memory_limit}}"
+{% endif %}
+{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
+ cpu: "{{openshift_prometheus_alertmanager_cpu_limit}}"
+{% endif %}
+ ports:
+ - containerPort: 9093
+ name: web
+ volumeMounts:
+ - mountPath: /etc/alertmanager
+ name: alertmanager-config
+ - mountPath: /alertmanager
+ name: alertmanager-data
+
+ restartPolicy: Always
+ volumes:
+ - name: prometheus-config
+ configMap:
+ defaultMode: 420
+ name: prometheus
+ - name: prometheus-secrets
+ secret:
+ secretName: prometheus-proxy
+ - name: prometheus-tls
+ secret:
+ secretName: prometheus-tls
+ - name: prometheus-data
+{% if openshift_prometheus_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alertmanager-config
+ configMap:
+ defaultMode: 420
+ name: prometheus-alerts
+ - name: alerts-secrets
+ secret:
+ secretName: alerts-proxy
+ - name: alerts-tls
+ secret:
+ secretName: prometheus-alerts-tls
+ - name: alertmanager-data
+{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertmanager_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
+ - name: alert-buffer-data
+{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_prometheus/tests/inventory b/roles/openshift_prometheus/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/roles/openshift_prometheus/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/roles/openshift_prometheus/tests/test.yaml b/roles/openshift_prometheus/tests/test.yaml
new file mode 100644
index 000000000..37baf573c
--- /dev/null
+++ b/roles/openshift_prometheus/tests/test.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - openshift_prometheus
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
index 01ee2544d..7c848cb12 100644
--- a/roles/openshift_service_catalog/defaults/main.yml
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -1,3 +1,7 @@
---
openshift_service_catalog_remove: false
openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
+
+openshift_use_openshift_sdn: True
+# os_sdn_network_plugin_name: "{% if openshift_use_openshift_sdn %}redhat/openshift-ovs-subnet{% else %}{% endif %}"
+os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 64f94347b..746c73eaf 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -28,7 +28,7 @@
- name: Make kube-service-catalog project network global
command: >
oc adm pod-network make-projects-global kube-service-catalog
- when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant'
+ when: os_sdn_network_plugin_name == 'redhat/openshift-ovs-multitenant'
- include: generate_certs.yml
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index c0ea00f34..204abe27e 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,6 +5,12 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
+- name: Install the base package for versioning
+ package:
+ name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: present
+ when: not is_containerized | bool
+
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.