summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_openstack.md1
-rw-r--r--README_origin.md15
-rw-r--r--inventory/byo/hosts.aep.example178
-rw-r--r--inventory/byo/hosts.origin.example182
-rw-r--r--inventory/byo/hosts.ose.example (renamed from inventory/byo/hosts.example)32
-rw-r--r--openshift-ansible.spec73
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml5
-rw-r--r--playbooks/adhoc/uninstall.yml34
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml33
-rw-r--r--playbooks/byo/openshift_facts.yml3
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml4
-rw-r--r--playbooks/common/openshift-master/config.yml18
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml88
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml15
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
-rw-r--r--roles/ansible/tasks/main.yml7
-rw-r--r--roles/cockpit/tasks/main.yml12
-rw-r--r--roles/copr_cli/tasks/main.yml6
-rw-r--r--roles/docker/README.md18
-rw-r--r--roles/docker/handlers/main.yml5
-rw-r--r--roles/docker/meta/main.yml128
-rw-r--r--roles/docker/tasks/main.yml7
-rw-r--r--roles/docker/tasks/udev_workaround.yml30
-rw-r--r--roles/docker/vars/main.yml3
-rw-r--r--roles/etcd/README.md2
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/etcd_common/defaults/main.yml2
-rw-r--r--roles/flannel/README.md3
-rw-r--r--roles/flannel/tasks/main.yml6
-rw-r--r--roles/fluentd_master/tasks/main.yml7
-rw-r--r--roles/fluentd_node/tasks/main.yml7
-rw-r--r--roles/haproxy/tasks/main.yml7
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml5
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml5
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml10
-rw-r--r--roles/openshift_common/tasks/main.yml1
-rw-r--r--roles/openshift_expand_partition/README.md2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml5
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py12
-rw-r--r--roles/openshift_facts/tasks/main.yml7
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml50
-rw-r--r--roles/openshift_manageiq/vars/main.yml24
-rw-r--r--roles/openshift_master/tasks/main.yml28
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_master_ca/tasks/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml20
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml7
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml7
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo8
-rw-r--r--roles/openshift_repos/handlers/main.yml5
-rw-r--r--roles/openshift_repos/tasks/main.yaml42
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml5
-rw-r--r--roles/os_env_extras/tasks/main.yaml7
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml8
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml11
-rw-r--r--roles/os_update_latest/tasks/main.yml5
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml93
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml34
-rw-r--r--roles/yum_repos/README.md2
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh9
-rw-r--r--utils/src/ooinstall/cli_installer.py2
-rw-r--r--utils/src/ooinstall/oo_config.py10
-rw-r--r--utils/test/cli_installer_tests.py57
66 files changed, 1181 insertions, 256 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index ce566784c..7432a2dca 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.16-1 ./
+3.0.17-1 ./
diff --git a/README_openstack.md b/README_openstack.md
index 8d8f6ef3f..9a2b627e2 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -31,6 +31,7 @@ The following options are used only by `heat_stack.yaml`. They are so used only
* `image_name`: Name of the image to use to spawn VMs
* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
+* `etcd_flavor` (default to `m1.small`): The ID or name of the flavor for the etcd nodes
* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
diff --git a/README_origin.md b/README_origin.md
index cb213a93a..343ecda3d 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -39,6 +39,12 @@ subscription-manager repos \
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
+* Fedora 23+ doesn't come with python2 and will need a quick bootstrap. Setup
+ your inventory as described below and run the following (substituting the
+ `$PATH_TO_INVENTORY_FILE` with the actual path to your inventory file):
+```sh
+ansible-playbook ./playbooks/adhoc/bootstrap-fedora.yml -i $PATH_TO_INVENTORY_FILE
+```
## Configuring the host inventory
[Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -59,6 +65,7 @@ nodes
# Set variables common for all OSEv3 hosts
[OSv3:vars]
+
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root
@@ -75,6 +82,14 @@ osv3-master.example.com
[nodes]
osv3-master.example.com
osv3-node[1:2].example.com
+
+# host group for etcd
+[etcd]
+osv3-etcd[1:3].example.com
+
+[lb]
+osv3-lb.example.com
+
```
The hostnames above should resolve both from the hosts themselves and
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
new file mode 100644
index 000000000..d5b872e06
--- /dev/null
+++ b/inventory/byo/hosts.aep.example
@@ -0,0 +1,178 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_sudo=true
+
+# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
+deployment_type=atomic-enterprise
+
+# Enable cluster metrics
+#use_cluster_metrics=true
+
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#cli_docker_additional_registries=registry.example.com
+#cli_docker_insecure_registries=registry.example.com
+#cli_docker_blocked_registries=registry.hacker.com
+
+# Alternate image format string. If you're not modifying the format string and
+# only need to inject your own registry you may want to consider
+# cli_docker_additional_registries instead
+#oreg_url=example.com/aep3/aep-${component}:${version}
+
+# Additional yum repos to install
+#openshift_additional_repos=[{'id': 'aep-devel', 'name': 'aep-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}]
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure Fluentd
+#use_fluentd=true
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# default subdomain to use for exposed routes
+#osm_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
+# default selectors for router and registry services
+# openshift_router_selector='region=infra'
+# openshift_registry_selector='region=infra'
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-3.1.0.0
+
+# Configure custom named certificates
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates: true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# host group for masters
+[masters]
+aep3-master[1:3]-ansible.test.example.com
+
+[etcd]
+aep3-etcd[1:3]-ansible.test.example.com
+
+[lb]
+aep3-lb-ansible.test.example.com
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+aep3-master[1:3]-ansible.test.example.com
+aep3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
new file mode 100644
index 000000000..77a3a04b4
--- /dev/null
+++ b/inventory/byo/hosts.origin.example
@@ -0,0 +1,182 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_sudo=true
+
+# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
+deployment_type=origin
+
+# Enable cluster metrics
+#use_cluster_metrics=true
+
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#cli_docker_additional_registries=registry.example.com
+#cli_docker_insecure_registries=registry.example.com
+#cli_docker_blocked_registries=registry.hacker.com
+
+# Alternate image format string. If you're not modifying the format string and
+# only need to inject your own registry you may want to consider
+# cli_docker_additional_registries instead
+#oreg_url=example.com/openshift3/ose-${component}:${version}
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# Origin Fedora copr repo
+# Use this if you are installing on Fedora
+#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}]
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# LDAP auth
+#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure Fluentd
+#use_fluentd=true
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# Native high availbility cluster method with optional load balancer.
+# If no lb group is defined installer assumes that a load balancer has
+# been preconfigured. For installation the value of
+# openshift_master_cluster_hostname must resolve to the load balancer
+# or to one or all of the masters defined in the inventory if no load
+# balancer is present.
+#openshift_master_cluster_method=native
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Pacemaker high availability cluster method.
+# Pacemaker HA environment must be able to self provision the
+# configured VIP. For installation openshift_master_cluster_hostname
+# must resolve to the configured VIP.
+#openshift_master_cluster_method=pacemaker
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# Override the default controller lease ttl
+#osm_controller_lease_ttl=30
+
+# default subdomain to use for exposed routes
+#osm_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
+# default selectors for router and registry services
+# openshift_router_selector='region=infra'
+# openshift_registry_selector='region=infra'
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-1.1
+
+# Configure custom named certificates
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates: true
+#
+# Provide local certificate paths which will be deployed to masters
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+#
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
+# Session options
+#openshift_master_session_name=ssn
+#openshift_master_session_max_seconds=3600
+
+# An authentication and encryption secret will be generated if secrets
+# are not provided. If provided, openshift_master_session_auth_secrets
+# and openshift_master_encryption_secrets must be equal length.
+#
+# Signing secrets, used to authenticate sessions using
+# HMAC. Recommended to use secrets with 32 or 64 bytes.
+#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+#
+# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
+# characters long, to select AES-128, AES-192, or AES-256.
+#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
+
+# configure how often node iptables rules are refreshed
+#openshift_node_iptables_sync_period=5s
+
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+[lb]
+ose3-lb-ansible.test.example.com
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.ose.example
index 1a67cc290..5a4310298 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.ose.example
@@ -18,26 +18,29 @@ ansible_ssh_user=root
# user must be configured for passwordless sudo
#ansible_sudo=true
-# deployment type valid values are origin, online and enterprise
-deployment_type=atomic-enterprise
+# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
+deployment_type=openshift-enterprise
# Enable cluster metrics
#use_cluster_metrics=true
-# Pre-release registry URL
-#oreg_url=example.com/openshift3/ose-${component}:${version}
-
-# Pre-release Dev puddle repo
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+# Add additional, insecure, and blocked registries to global docker configuration
+# For enterprise deployment types we ensure that registry.access.redhat.com is
+# included if you do not include it
+#cli_docker_additional_registries=registry.example.com
+#cli_docker_insecure_registries=registry.example.com
+#cli_docker_blocked_registries=registry.hacker.com
-# Pre-release Errata puddle repo
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+# Alternate image format string. If you're not modifying the format string and
+# only need to inject your own registry you may want to consider
+# cli_docker_additional_registries instead
+#oreg_url=example.com/openshift3/ose-${component}:${version}
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+# Additional yum repos to install
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}]
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
@@ -109,7 +112,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_use_openshift_sdn=False
# set RPM version for debugging purposes
-#openshift_pkg_version=-3.0.0.0
+#openshift_pkg_version=-3.1.0.0
# Configure custom named certificates
# NOTE: openshift_master_named_certificates is cached on masters and is an
@@ -154,6 +157,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# entries for the interfaces attached to the host.
#openshift_set_hostname=True
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 09569761f..962b9e85b 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.16
+Version: 3.0.17
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -258,6 +258,77 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Dec 08 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.17-1
+- Improving output when gathering facts (bleanhar@redhat.com)
+- Bug 1287977 - Incorrect check output from atomic-openshift-installer when
+ working with preconfigured load balancer (bleanhar@redhat.com)
+- Add unique AEP, OSE, and Origin BYO inventories (sdodson@redhat.com)
+- bring the docker udev workaround into openshift-ansible.git
+ (jdiaz@redhat.com)
+- Zabbix: put in a note about trigger prototype dependency
+ (mwoodson@redhat.com)
+- Zabbix: added dependency for inode disk check (mwoodson@redhat.com)
+- Zabbix: added dependency for disk check (mwoodson@redhat.com)
+- zabbix: removed ethernet graphs (mwoodson@redhat.com)
+- Zabbix: added trigger dependencies to certain master checks
+ (mwoodson@redhat.com)
+- ManageIQ Service Account: added role for ManageIQ service account
+ (efreiber@redhat.com)
+- added the pv zabbix keys (mwoodson@redhat.com)
+- Refactor dns options and facts. (abutcher@redhat.com)
+- Fix openshift_facts playbook for yum/dnf changes (jdetiber@redhat.com)
+- Configured master count should be 1 for pacemaker ha. (abutcher@redhat.com)
+- Fedora changes: (admiller@redhat.com)
+- Centralize etcd/schedulability logic for each host. (dgoodwin@redhat.com)
+- added upgrade playbook for online (sedgar@redhat.com)
+- Improved installation summary. (dgoodwin@redhat.com)
+- Fix kubernetes service ip gathering. (abutcher@redhat.com)
+- added docker registry cluster check (mwoodson@redhat.com)
+- Add warning for HA deployments with < 3 dedicated nodes.
+ (dgoodwin@redhat.com)
+- Cleanup more schedulable typos. (dgoodwin@redhat.com)
+- Fix validation for BasicAuthPasswordIdentityProvider (tschan@puzzle.ch)
+- Fix ec2 instance type lookups (jdetiber@redhat.com)
+- remove debug logging from scc/privileged patch command (jdetiber@redhat.com)
+- Set api version for oc commands (jdetiber@redhat.com)
+- 3.1 upgrade - use --api-version for patch commands (jdetiber@redhat.com)
+- Fix bug when warning on no dedicated nodes. (dgoodwin@redhat.com)
+- Suggest dedicated nodes for an HA deployment. (dgoodwin@redhat.com)
+- Error out if no load balancer specified. (dgoodwin@redhat.com)
+- Adjust requirement for 3 masters for HA deployments. (dgoodwin@redhat.com)
+- Fixing 'unscheduleable' typo (bleanhar@redhat.com)
+- Update IMAGE_PREFIX and IMAGE_VERSION values in hawkular template
+ (nakayamakenjiro@gmail.com)
+- Improved output when re-running after editing config. (dgoodwin@redhat.com)
+- Print a system summary after adding each. (dgoodwin@redhat.com)
+- Text improvements for host specification. (dgoodwin@redhat.com)
+- Assert etcd section written for HA installs. (dgoodwin@redhat.com)
+- Breakout a test fixture to reduce module size. (dgoodwin@redhat.com)
+- Pylint touchups. (dgoodwin@redhat.com)
+- Trim assertions in HA testing. (dgoodwin@redhat.com)
+- Test unattended HA quick install. (dgoodwin@redhat.com)
+- Don't prompt to continue during unattended installs. (dgoodwin@redhat.com)
+- Block re-use of master/node as load balancer in attended install.
+ (dgoodwin@redhat.com)
+- Add -q flag to remove unwantend output (such as mirror and cache information)
+ (urs.breu@ergon.ch)
+- Uninstall: only restart docker on node hosts. (abutcher@redhat.com)
+- Explicitly set schedulable when masters == nodes. (dgoodwin@redhat.com)
+- Use admin.kubeconfig for get svc ip. (abutcher@redhat.com)
+- Point enterprise metrics at registry.access.redhat.com/openshift3/metrics-
+ (sdodson@redhat.com)
+- Make sure that OpenSSL is installed before use (fsimonce@redhat.com)
+- fixes for installer wrapper scaleup (jdetiber@redhat.com)
+- addtl aws fixes (jdetiber@redhat.com)
+- Fix failure when seboolean not present (jdetiber@redhat.com)
+- fix addNodes.yml (jdetiber@redhat.com)
+- more aws support for scaleup (jdetiber@redhat.com)
+- start of aws scaleup (jdetiber@redhat.com)
+- Improve scaleup playbook (jdetiber@redhat.com)
+- Update openshift_repos to refresh package cache on changes
+ (jdetiber@redhat.com)
+- Add etcd nodes management in OpenStack (lhuard@amadeus.com)
+
* Tue Nov 24 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.16-1
- Silencing pylint branch errors for now for the atomic-openshift-installer
harness (bleanhar@redhat.com)
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..de9f36c8a
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,5 @@
+- hosts: OSv3
+ gather_facts: false
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 1f1ada3f0..08a2ea6fb 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -48,7 +48,39 @@
- pcsd
- yum: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: ansible_pkg_mgr == "yum" and not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - corosync
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - pacemaker
+ - pcs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - dnf: name={{ item }} state=absent
+ when: ansible_pkg_mgr == "dnf" and not is_atomic | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..8cad51b5e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,33 @@
+---
+# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
+# Usage:
+# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+- hosts: localhost
+ gather_facts: no
+ vars_files:
+ - ../../vars.yml
+ - "../../vars.{{ deployment_type }}.{{ cluster_id }}.yml"
+
+ tasks:
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+
+ - set_fact:
+ tmp_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+ when: deployment_type != 'online'
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ tmp_nodes_group | default('') }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 6d7c12fd4..babdfb952 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,7 +1,6 @@
---
- name: Gather Cluster facts
- hosts: all
- gather_facts: no
+ hosts: OSEv3
roles:
- openshift_facts
tasks:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index a8bd634d3..482fa8441 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -6,6 +6,3 @@
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
- vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 00ebf4ce6..0309e8a77 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -36,9 +36,9 @@
- fail:
msg: >
- This upgrade is only supported for origin and openshift-enterprise
+ This upgrade is only supported for origin, openshift-enterprise, and online
deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
- fail:
msg: >
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1c8a92122..becd68dbe 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -352,22 +352,8 @@
- openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
-
- # TODO: Setting the cluster dns ip should be pushed into openshift-facts
-- name: Determine cluster dns ip
- hosts: oo_first_master
- tasks:
- - name: Get master service ip
- # This command has to be on a single line.
- command: "{{ openshift.common.client_binary }} -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\} --output-version=v1"
- register: master_service_ip_output
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
- when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - role: openshift_manageiq
+ when: openshift.common.use_manageiq | bool
- name: Enable cockpit
hosts: oo_first_master
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 40e4ab22c..bfd73c777 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -43,6 +43,11 @@ parameters:
description: Source of legitimate ssh connections
default: 0.0.0.0/0
+ num_etcd:
+ type: number
+ label: Number of etcd nodes
+ description: Number of etcd nodes
+
num_masters:
type: number
label: Number of masters
@@ -58,6 +63,11 @@ parameters:
label: Number of infrastructure nodes
description: Number of infrastructure nodes
+ etcd_image:
+ type: string
+ label: Etcd image
+ description: Name of the image for the etcd servers
+
master_image:
type: string
label: Master image
@@ -73,6 +83,11 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
+ etcd_flavor:
+ type: string
+ label: Etcd flavor
+ description: Flavor of the etcd servers
+
master_flavor:
type: string
label: Master flavor
@@ -90,6 +105,18 @@ parameters:
outputs:
+ etcd_names:
+ description: Name of the etcds
+ value: { get_attr: [ etcd, name ] }
+
+ etcd_ips:
+ description: IPs of the etcds
+ value: { get_attr: [ etcd, private_ip ] }
+
+ etcd_floating_ips:
+ description: Floating IPs of the etcds
+ value: { get_attr: [ etcd, floating_ip ] }
+
master_names:
description: Name of the masters
value: { get_attr: [ masters, name ] }
@@ -220,6 +247,37 @@ resources:
port_range_min: 24224
port_range_max: 24224
+ etcd-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-etcd-secgrp
+ params:
+ cluster_id: { get_param: cluster_id }
+ description:
+ str_replace:
+ template: Security group for cluster_id etcd cluster
+ params:
+ cluster_id: { get_param: cluster_id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh_incoming }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2379
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2380
+ port_range_max: 2380
+ remote_mode: remote_group_id
+
node-secgrp:
type: OS::Neutron::SecurityGroup
properties:
@@ -274,6 +332,36 @@ resources:
port_range_min: 443
port_range_max: 443
+ etcd:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_etcd }
+ resource_def:
+ type: heat_stack_server.yaml
+ properties:
+ name:
+ str_replace:
+ template: cluster_id-k8s_type-%index%
+ params:
+ cluster_id: { get_param: cluster_id }
+ k8s_type: etcd
+ cluster_id: { get_param: cluster_id }
+ type: etcd
+ image: { get_param: etcd_image }
+ flavor: { get_param: etcd_flavor }
+ key_name: { get_resource: keypair }
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: etcd-secgrp }
+ floating_network: { get_param: floating_ip_pool }
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: { get_param: cluster_id }
+ depends_on: interface
+
masters:
type: OS::Heat::ResourceGroup
properties:
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 651aef40b..b18512495 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -35,12 +35,15 @@
-P floating_ip_pool={{ openstack_floating_ip_pool }}
-P ssh_public_key="{{ openstack_ssh_public_key }}"
-P ssh_incoming={{ openstack_ssh_access_from }}
+ -P num_etcd={{ num_etcd }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
-P num_infra={{ num_infra }}
+ -P etcd_image={{ deployment_vars[deployment_type].image }}
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
-P infra_image={{ deployment_vars[deployment_type].image }}
+ -P etcd_flavor={{ openstack_flavor["etcd"] }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
@@ -61,6 +64,18 @@
- set_fact:
parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}"
+ - name: Add new etcd instances groups and variables
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[2] }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_env-host-type_{{ cluster_id }}-openshift-etcd, tag_sub-host-type_default'
+ with_together:
+ - parsed_outputs.etcd_names
+ - parsed_outputs.etcd_ips
+ - parsed_outputs.etcd_floating_ips
+
- name: Add new master instances groups and variables
add_host:
hostname: '{{ item[0] }}'
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 262d3f4ed..e3796c91f 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -14,6 +14,7 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
+ etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}"
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml
index 5d20a3b35..f79273824 100644
--- a/roles/ansible/tasks/main.yml
+++ b/roles/ansible/tasks/main.yml
@@ -5,6 +5,13 @@
yum:
pkg: ansible
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ansible
+ dnf:
+ pkg: ansible
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
- include: config.yml
vars:
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 875cbad21..8410e7c90 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -8,6 +8,18 @@
- cockpit-shell
- cockpit-bridge
- "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install cockpit-ws
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - cockpit-ws
+ - cockpit-shell
+ - cockpit-bridge
+ - "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "dnf"
- name: Enable cockpit-ws
service:
diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml
index f7ef1c26e..f8496199d 100644
--- a/roles/copr_cli/tasks/main.yml
+++ b/roles/copr_cli/tasks/main.yml
@@ -2,3 +2,9 @@
- yum:
name: copr-cli
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- dnf:
+ name: copr-cli
+ state: present
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 225dd44b9..46f259eb7 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -1,38 +1,38 @@
Role Name
=========
-A brief description of the role goes here.
+Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+None
Role Variables
--------------
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+udevw_udevd_dir: location of systemd config for systemd-udevd.service
+docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
Dependencies
------------
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+None
Example Playbook
----------------
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- hosts: servers
roles:
- - { role: username.rolename, x: 42 }
+ - role: docker
+ docker_udev_workaround: "true"
License
-------
-BSD
+ASL 2.0
Author Information
------------------
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+OpenShift operations, Red Hat, Inc
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index eca7419c1..7d60f1891 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,3 +2,8 @@
- name: restart docker
service: name=docker state=restarted
+
+- name: restart udev
+ service:
+ name: systemd-udevd
+ state: restarted
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index c5c362c60..6e2c98601 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -1,124 +1,12 @@
---
galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
+ author: OpenShift
+ description: docker package install
+ company: Red Hat, Inc
+ license: ASL 2.0
min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
+ platforms:
+ - name: EL
+ versions:
+ - 7
dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 96949230d..857674454 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,7 +2,14 @@
# tasks file for docker
- name: Install docker
yum: pkg=docker
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install docker
+ dnf: pkg=docker
+ when: ansible_pkg_mgr == "dnf"
- name: enable and start the docker service
service: name=docker enabled=yes state=started
+- include: udev_workaround.yml
+ when: docker_udev_workaround | default(False)
diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml
new file mode 100644
index 000000000..3c236f698
--- /dev/null
+++ b/roles/docker/tasks/udev_workaround.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Getting current systemd-udevd exec command
+ command: grep -e "^ExecStart=" /lib/systemd/system/systemd-udevd.service
+ changed_when: false
+ register: udevw_udev_start_cmd
+
+- name: Assure systemd-udevd.service.d directory exists
+ file:
+ path: "{{ udevw_udevd_dir }}"
+ state: directory
+
+- name: Create systemd-udevd override file
+ copy:
+ content: |
+ [Service]
+ #Need blank ExecStart to "clear" pre-exising one
+ ExecStart=
+ {{ udevw_udev_start_cmd.stdout }} --event-timeout=300
+ dest: "{{ udevw_udevd_dir }}/override.conf"
+ owner: root
+ mode: "0644"
+ notify:
+ - restart udev
+ register: udevw_override_conf
+
+- name: reload systemd config files
+ command: systemctl daemon-reload
+ when: udevw_override_conf | changed
+
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
new file mode 100644
index 000000000..162487545
--- /dev/null
+++ b/roles/docker/vars/main.yml
@@ -0,0 +1,3 @@
+---
+
+udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 88e4ff874..329a926c0 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -7,7 +7,7 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'etcd' available via yum.
+named 'etcd' available via yum or dnf (conditionally).
Role Variables
--------------
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fcbdecd37..efaab5f31 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -9,6 +9,11 @@
- name: Install etcd
yum: pkg=etcd-2.* state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install etcd
+ dnf: pkg=etcd* state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Validate permissions on the config dir
file:
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 96f4b63af..3af509448 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,5 +1,5 @@
---
-etcd_peers_group: etcd
+etcd_peers_group: oo_etcd_to_config
# etcd server vars
etcd_conf_dir: /etc/etcd
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index b8aa830ac..8f271aada 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -7,7 +7,8 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'flannel' available via yum, in version superior to 0.3.
+named 'flannel' available via yum or dnf (conditionally), in version superior
+to 0.3.
Role Variables
--------------
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index acfb009ec..86e1bc96e 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,6 +2,12 @@
- name: Install flannel
sudo: true
yum: pkg=flannel state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install flannel
+ sudo: true
+ dnf: pkg=flannel state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Set flannel etcd url
sudo: true
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index 55cd94460..43c499b4d 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
index f9ef30b83..827a1c075 100644
--- a/roles/fluentd_node/tasks/main.yml
+++ b/roles/fluentd_node/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 5638b7313..5d015fadd 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -3,6 +3,13 @@
yum:
pkg: haproxy
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install haproxy
+ dnf:
+ pkg: haproxy
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Configure haproxy
template:
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index d1dcf261a..3fcb9fd18 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Install pyparted (RedHat/Fedora)
yum: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install pyparted (RedHat/Fedora)
+ dnf: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "dnf"
- name: partition the drives
partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index 559fcf17c..a58a7b824 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server on Fedora/Red Hat
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server on Fedora/Red Hat
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind on Fedora/Red Hat
service: name=rpcbind state=started enabled=yes
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index f6919dada..2b99f8bcd 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -2,6 +2,16 @@
- yum:
name: "{{ item }}"
state: present
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
+
+- dnf:
+ name: "{{ item }}"
+ state: present
+ when: ansible_pkg_mgr == "dnf"
with_items:
- openshift-ansible-inventory
- openshift-ansible-inventory-aws
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 55065b3de..c0982290d 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -22,6 +22,7 @@
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+ use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
# hostname by default.
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index cd394e1ba..aed4ec871 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -8,7 +8,7 @@ partition, and then expanding the file system on the partition.
* A machine with a disk that is not fully utilized
-* cloud-utils-growpart rpm (either installed or avialable via yum)
+* cloud-utils-growpart rpm (either installed or avialable via yum or dnf)
* The partition you are expanding needs to be at the end of the partition list
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 8bc399070..42e7903fd 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Ensure growpart is installed
yum: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure growpart is installed
+ dnf: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Grow the partitions
command: "growpart {{oep_drive}} {{oep_partition}}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index b60e42c71..8b3402729 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -528,9 +528,9 @@ def set_aggregate_facts(facts):
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
+ cluster_domain = facts['common']['dns_domain']
+
if 'master' in facts:
- # FIXME: not sure why but facts['dns']['domain'] fails
- cluster_domain = 'cluster.local'
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
@@ -623,7 +623,7 @@ def set_deployment_facts_if_unset(facts):
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
- elif deployment_type in ['enterprise', 'online']:
+ elif deployment_type in ['enterprise']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
@@ -985,7 +985,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd']
def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
@@ -1053,9 +1053,10 @@ class OpenShiftFacts(object):
common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
deployment_type='origin', hostname=hostname,
- public_hostname=hostname)
+ public_hostname=hostname, use_manageiq=False)
common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
+ common['dns_domain'] = 'cluster.local'
defaults['common'] = common
if 'master' in roles:
@@ -1076,7 +1077,6 @@ class OpenShiftFacts(object):
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
iptables_sync_period='5s', set_node_ip=False)
defaults['node'] = node
-
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 913f0dc78..2e889d7d5 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -8,6 +8,13 @@
- name: Ensure PyYaml is installed
yum: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - PyYAML
+
+- name: Ensure PyYaml is installed
+ dnf: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "dnf"
with_items:
- PyYAML
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
new file mode 100644
index 000000000..2d3187e21
--- /dev/null
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -0,0 +1,50 @@
+---
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
+ changed_when: false
+
+- name: Add Managment Infrastructure project
+ command: >
+ {{ openshift.common.admin_binary }} new-project
+ management-infra
+ --description="Management Infrastructure"
+ --config={{manage_iq_tmp_conf}}
+ register: osmiq_create_mi_project
+ failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
+ changed_when: osmiq_create_mi_project.rc == 0
+
+- name: Create Service Account
+ shell: >
+ echo {{ manageiq_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n management-infra
+ --config={{manage_iq_tmp_conf}}
+ -f -
+ register: osmiq_create_service_account
+ failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0"
+ changed_when: osmiq_create_service_account.rc == 0
+
+- name: Create Cluster Role
+ shell: >
+ echo {{ manageiq_cluster_role | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ --config={{manage_iq_tmp_conf}}
+ -f -
+ register: osmiq_create_cluster_role
+ failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
+ changed_when: osmiq_create_cluster_role.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{manage_iq_tmp_conf}}
+ with_items: "{{manage_iq_tasks}}"
+ register: osmiq_perm_task
+ failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
+ changed_when: osmiq_perm_task.rc == 0
+
+- name: Clean temporary configuration file
+ command: >
+ rm -f {{manage_iq_tmp_conf}}
+ changed_when: false
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
new file mode 100644
index 000000000..77e1c304b
--- /dev/null
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -0,0 +1,24 @@
+manageiq_cluster_role:
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: management-infra-admin
+ rules:
+ - resources:
+ - pods/proxy
+ verbs:
+ - '*'
+
+manageiq_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: management-admin
+
+manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
+
+manage_iq_tasks:
+ - policy add-role-to-user -n management-infra admin -z management-admin
+ - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+ - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+ - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 2cf2a53c4..8a78f8f2a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -79,16 +79,16 @@
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
register: install_result
-# TODO: These values need to be configurable
-- name: Set dns facts
+- name: Install Master package
+ dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
+ register: install_result
+
+- name: Re-gather package dependent master facts
openshift_facts:
- role: dns
- local_facts:
- ip: "{{ openshift_master_cluster_vip | default(openshift.common.ip, true) | default(None) }}"
- domain: cluster.local
- when: openshift.master.embedded_dns
- name: Create config parent directory if it does not exist
file:
@@ -118,7 +118,12 @@
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
- when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider')
+ with_items: openshift.master.identity_providers
+
+- name: Install httpd-tools if needed
+ dnf: pkg=httpd-tools state=present
+ when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider')
with_items: openshift.master.identity_providers
- name: Ensure htpasswd directory exists
@@ -263,7 +268,12 @@
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ when: (ansible_pkg_mgr == "yum") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ register: install_result
+
+- name: Install cluster packages
+ dnf: pkg=pcs state=present
+ when: (ansible_pkg_mgr == "dnf") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 9f4a17f0a..cadb02fa3 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -83,7 +83,7 @@ kubernetesMasterConfig:
{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
- masterCount: {{ openshift.master.master_count }}
+ masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 314f068e7..caac13be3 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,12 @@
---
- name: Install the base package for admin tooling
yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install the base package for admin tooling
+ dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Reload generated facts
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 42d984a09..29e7eb532 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,12 +1,6 @@
---
# TODO: allow for overriding default ports where possible
- fail:
- msg: This role requres that osn_cluster_dns_domain is set
- when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain
-- fail:
- msg: This role requres that osn_cluster_dns_ip is set
- when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
-- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
@@ -20,6 +14,7 @@
hostname: "{{ openshift_hostname | default(none) }}"
public_hostname: "{{ openshift_public_hostname | default(none) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip | default(None, true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -40,12 +35,23 @@
# problems because the rpms don't pin the version properly.
- name: Install Node package
yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: node_install_result
+
+- name: Install Node package
+ dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: node_install_result
- name: Install sdn-ovs package
yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
register: sdn_install_result
- when: openshift.common.use_openshift_sdn
+ when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn
+
+- name: Install sdn-ovs package
+ dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
+ register: sdn_install_result
+ when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index b6936618a..b5146dcac 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -3,3 +3,10 @@
yum:
pkg: ceph-common
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ceph storage plugin dependencies
+ dnf:
+ pkg: ceph-common
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index decf4f49d..a357023e1 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,6 +3,13 @@
yum:
pkg: glusterfs-fuse
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install GlusterFS storage plugin dependencies
+ dnf:
+ pkg: glusterfs-fuse
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
- name: Set sebooleans to allow gluster storage plugin access from containers
seboolean:
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 41a303dee..23bd81f91 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,7 +1,9 @@
allowDisabledDocker: false
apiVersion: v1
-dnsDomain: {{ osn_cluster_dns_domain }}
-dnsIP: {{ osn_cluster_dns_ip }}
+dnsDomain: {{ openshift.common.dns_domain }}
+{% if 'dns_ip' in openshift.common %}
+dnsIP: {{ openshift.common.dns_ip }}
+{% endif %}
dockerConfig:
execHandlerName: ""
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
new file mode 100644
index 000000000..bc0435d82
--- /dev/null
+++ b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
@@ -0,0 +1,8 @@
+[maxamillion-fedora-openshift]
+name=Copr repo for fedora-openshift owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
+enabled=1
+enabled_metadata=1 \ No newline at end of file
diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml
index 26558a455..fed4ab2f0 100644
--- a/roles/openshift_repos/handlers/main.yml
+++ b/roles/openshift_repos/handlers/main.yml
@@ -1,3 +1,6 @@
---
-- name: refresh package cache
+- name: refresh yum cache
command: yum clean all
+
+- name: refresh dnf cache
+ command: dnf clean all
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 66be0cb7b..c55b5df89 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -14,38 +14,64 @@
yum:
pkg: libselinux-python
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure libselinux-python is installed
+ dnf:
+ pkg: libselinux-python
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Create any additional repos that are defined
template:
src: yum_repo.j2
dest: /etc/yum.repos.d/openshift_additional.repo
when: openshift_additional_repos | length > 0
- notify: refresh package cache
+ notify: refresh yum cache
- name: Remove the additional repos if no longer defined
file:
dest: /etc/yum.repos.d/openshift_additional.repo
state: absent
when: openshift_additional_repos | length == 0
- notify: refresh package cache
+ notify: refresh yum cache
-- name: Remove any yum repo files for other deployment types
+- name: Remove any yum repo files for other deployment types RHEL/CentOS
file:
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- '*/repos/*'
- when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- notify: refresh package cache
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+ notify: refresh yum cache
+
+- name: Remove any yum repo files for other deployment types Fedora
+ file:
+ path: "/etc/yum.repos.d/{{ item | basename }}"
+ state: absent
+ with_fileglob:
+ - '*/repos/*'
+ when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_distribution == "Fedora")
+ notify: refresh dnf cache
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- "{{ openshift_deployment_type }}/gpg_keys/*"
- notify: refresh package cache
+ notify: refresh yum cache
-- name: Configure yum repositories
+- name: Configure yum repositories RHEL/CentOS
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- "{{ openshift_deployment_type }}/repos/*"
- notify: refresh package cache
+ notify: refresh yum cache
+ when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+
+- name: Configure yum repositories Fedora
+ copy: src={{ item }} dest=/etc/yum.repos.d/
+ with_fileglob:
+ - "fedora-{{ openshift_deployment_type }}/repos/*"
+ notify: refresh dnf cache
+ when: (ansible_distribution == "Fedora")
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 65ae069df..bf23dfe98 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind
service: name=rpcbind state=started enabled=yes
diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml
index 96b12ad5b..29599559c 100644
--- a/roles/os_env_extras/tasks/main.yaml
+++ b/roles/os_env_extras/tasks/main.yaml
@@ -15,3 +15,10 @@
yum:
pkg: bash-completion
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Bash Completion
+ dnf:
+ pkg: bash-completion
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 5089eb3e0..cf2a2c733 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,14 @@
yum:
name: firewalld
state: present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install firewalld packages
+ dnf:
+ name: firewalld
+ state: present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if iptables-services is installed
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 9af9d8d29..36d51504c 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,17 @@
with_items:
- iptables
- iptables-services
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install iptables packages
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iptables
+ - iptables-services
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if firewalld is installed
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index 4a2c3d47a..40eec8d35 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -1,3 +1,8 @@
---
- name: Update all packages
yum: name=* state=latest
+ when: ansible_pkg_mgr == "yum"
+
+- name: Update all packages
+ dnf: name=* state=latest
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 6972ac877..514d6fd24 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -68,6 +68,36 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.pv.total.count
+ description: Total number of Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.available.count
+ description: Total number of Available Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.released.count
+ description: Total number of Released Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.bound.count
+ description: Total number of Bound Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.failed.count
+ description: Total number of Failed Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.etcd.create.success
description: Show number of successful create actions
type: int
@@ -201,26 +231,6 @@ g_template_openshift_master:
- Openshift Master Metrics
ztriggers:
- - name: 'Application creation has failed on {HOST.NAME}'
- expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- priority: avg
-
- - name: 'Openshift Master API health check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: avg
-
- name: 'Openshift Master process not running on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -231,6 +241,16 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: high
+ - name: 'Low number of etcd watchers on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: avg
+
+ - name: 'Etcd ping failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: high
+
- name: 'Number of users for Openshift Master on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -241,19 +261,40 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
- - name: 'Low number of etcd watchers on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ # Put triggers that depend on other triggers here (deps must be created first)
+ - name: 'Application creation has failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: avg
- - name: 'Etcd ping failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ - name: 'Openshift Master API health check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: high
+ - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: avg
+
- name: 'Docker Registry check failed on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: high
zgraphs:
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 79d52ef9b..c6e557f12 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -258,26 +258,34 @@ g_template_os_linux:
- Network
ztriggerprototypes:
- - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
-
- name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ # This has a dependency on the previous trigger
+ # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
- name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
+ # This has a dependency on the previous trigger
+ # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
+
ztriggers:
- name: 'Too many TOTAL processes on {HOST.NAME}'
expression: '{Template OS Linux:proc.nprocs.last()}>5000'
@@ -304,15 +312,3 @@ g_template_os_linux:
description: 'CPU is less than 10% idle'
dependencies:
- 'CPU idle less than 5% on {HOST.NAME}'
-
- zgraphprototypes:
- - name: Network Interface Usage
- width: 1000
- height: 400
- graph_items:
- - item_name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
- item_type: prototype
- color: red
- - item_name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
- item_type: prototype
- color: blue
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
index 51ecd5d34..908ab4972 100644
--- a/roles/yum_repos/README.md
+++ b/roles/yum_repos/README.md
@@ -6,7 +6,7 @@ This role allows easy deployment of yum repository config files.
Requirements
------------
-Yum
+Yum or dnf
Role Variables
--------------
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
index e1b2cec90..3847c029a 100755
--- a/utils/site_assets/oo-install-bootstrap.sh
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -9,6 +9,13 @@ cmdlnargs="$@"
: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+if rpm -q dnf;
+then
+ PKG_MGR="dnf"
+else
+ PKG_MGR="yum"
+fi
+
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
clear
@@ -18,7 +25,7 @@ if [ -e /etc/redhat-release ]
then
for i in python python-virtualenv openssh-clients gcc
do
- rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; }
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; }
done
fi
for i in python virtualenv ssh gcc
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 8cabe5431..dc88cb1ad 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -752,8 +752,8 @@ def install(ctx, force):
check_hosts_config(oo_cfg, ctx.obj['unattended'])
- click.echo('Gathering information from hosts...')
print_installation_summary(oo_cfg.hosts)
+ click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 1be85bc1d..031b82bc1 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -14,7 +14,8 @@ PERSIST_SETTINGS = [
'variant_version',
'version',
]
-REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
class OOConfigFileError(Exception):
@@ -208,7 +209,12 @@ class OOConfig(object):
for host in self.hosts:
missing_facts = []
- for required_fact in REQUIRED_FACTS:
+ if host.preconfigured:
+ required_facts = PRECONFIGURED_REQUIRED_FACTS
+ else:
+ required_facts = DEFAULT_REQUIRED_FACTS
+
+ for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index d028bf472..1da49c807 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -225,6 +225,44 @@ hosts:
master: true
"""
+QUICKHA_CONFIG_PRECONFIGURED_LB = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+ master: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ node: true
+ - connect_to: proxy-private.example.com
+ hostname: proxy-private.example.com
+ public_hostname: proxy.example.com
+ master_lb: true
+ preconfigured: true
+"""
+
class UnattendedCliTests(OOCliFixture):
def setUp(self):
@@ -608,6 +646,25 @@ class UnattendedCliTests(OOCliFixture):
# This is not a valid configuration:
self.assert_result(result, 1)
+ #unattended with preconfigured lb
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_PRECONFIGURED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(5, len(hosts))
+ self.assertEquals(5, len(hosts_to_run_on))
class AttendedCliTests(OOCliFixture):