From 03e6ae850ce718c008636bd8db093f453e62ccf3 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Thu, 12 Nov 2015 10:46:25 -0500 Subject: Refactor named certificates. --- inventory/byo/hosts.example | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 56bbb9612..423581281 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -111,8 +111,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # set RPM version for debugging purposes #openshift_pkg_version=-3.0.0.0 -# Configure custom master certificates +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# # Detected names may be overridden by specifying the "names" key #openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] -- cgit v1.2.3 From 42232eb59cc3c6ae5d4733b6655add0aff23217b Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 24 Nov 2015 14:06:12 -0500 Subject: Conditionally set the nodeIP --- inventory/byo/hosts.example | 5 +++++ roles/openshift_facts/library/openshift_facts.py | 2 +- roles/openshift_node/tasks/main.yml | 1 + roles/openshift_node/templates/node.yaml.v1.j2 | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 423581281..50a683cfc 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -144,6 +144,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # configure how often node iptables rules are refreshed #openshift_node_iptables_sync_period=5s +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 6006bfa9d..b60e42c71 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1074,7 +1074,7 @@ class OpenShiftFacts(object): if 'node' in roles: node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16', - iptables_sync_period='5s') + iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node return defaults diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index d11bc5123..42d984a09 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -34,6 +34,7 @@ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" + set_node_ip: "{{ openshift_set_node_ip | default(None) }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 7d2f506e3..41a303dee 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -23,7 +23,9 @@ networkConfig: {% if openshift.common.use_openshift_sdn %} networkPluginName: {{ openshift.common.sdn_network_plugin_name }} {% endif %} +{% if openshift.node.set_node_ip | bool %} nodeIP: {{ openshift.common.ip }} +{% endif %} nodeName: {{ openshift.common.hostname | lower }} podManifestConfig: servingInfo: -- cgit v1.2.3 From 993785d915b08ad3c1d25faf20759e80733d77d0 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 24 Nov 2015 13:05:07 -0500 Subject: Rework setting of hostname - set the hostname for all installs < 3.1 or 1.1 - provide a new variable openshift_set_hostname to override default behavior --- inventory/byo/hosts.example | 5 +++++ roles/openshift_common/tasks/main.yml | 13 +++++++++++++ 2 files changed, 18 insertions(+) (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 50a683cfc..ef0736b63 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -149,6 +149,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # interface other than the default network interface. #openshift_node_set_node_ip=True +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index e9df4e364..55065b3de 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -3,6 +3,10 @@ msg: Flannel can not be used with openshift sdn when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool +- fail: + msg: openshift_hostname must be 64 characters or less + when: openshift_hostname is defined and openshift_hostname | length > 64 + - name: Set common Cluster facts openshift_facts: role: common @@ -18,3 +22,12 @@ deployment_type: "{{ openshift_deployment_type }}" use_fluentd: "{{ openshift_use_fluentd | default(None) }}" use_flannel: "{{ openshift_use_flannel | default(None) }}" + + # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the + # hostname by default. +- set_fact: + set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}" + +- name: Set hostname + hostname: name={{ openshift.common.hostname }} + when: openshift_set_hostname | default(set_hostname_default) | bool -- cgit v1.2.3 From 385ca96f5aaf8987820a5c7a25349ab7bedf9318 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Tue, 24 Nov 2015 17:35:43 -0500 Subject: Fixing 'unscheduleable' typo --- inventory/byo/hosts.example | 2 +- utils/src/ooinstall/cli_installer.py | 6 +++--- utils/src/ooinstall/openshift_ansible.py | 12 ++++++------ utils/test/cli_installer_tests.py | 14 +++++++------- 4 files changed, 17 insertions(+), 17 deletions(-) (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index ef0736b63..1a67cc290 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -166,7 +166,7 @@ ose3-lb-ansible.test.example.com # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes # However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_scheduleable=False any node that's also a master. +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. [nodes] ose3-master[1:3]-ansible.test.example.com ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index d7c06745e..0b38f706c 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -90,7 +90,7 @@ change this later using etcd from Red Hat Enterprise Linux 7. Any Masters configured as part of this installation process will also be configured as Nodes. This is so that the Master will be able to proxy to Pods -from the API. By default this Node will be unscheduleable but this can be changed +from the API. By default this Node will be unschedulable but this can be changed after installation with 'oadm manage-node'. The OpenShift Node provides the runtime environments for containers. It will @@ -274,8 +274,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m if len(masters) == len(nodes): message = """ No dedicated Nodes specified. By default, colocated Masters have their Nodes -set to unscheduleable. Continuing at this point will label all nodes as -scheduleable. +set to unschedulable. Continuing at this point will label all nodes as +schedulable. """ confirm_continue(message) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 9afc9a644..4aa60922d 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -67,10 +67,10 @@ def generate_inventory(hosts): for node in nodes: # TODO: Until the Master can run the SDN itself we have to configure the Masters # as Nodes too. - scheduleable = True + schedulable = True if node in masters: - scheduleable = False - write_host(node, base_inventory, scheduleable) + schedulable = False + write_host(node, base_inventory, schedulable) if not getattr(proxy, 'preconfigured', True): base_inventory.write('\n[lb]\n') @@ -112,7 +112,7 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy): base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname)) -def write_host(host, inventory, scheduleable=True): +def write_host(host, inventory, schedulable=True): global CFG facts = '' @@ -126,8 +126,8 @@ def write_host(host, inventory, scheduleable=True): facts += ' openshift_public_hostname={}'.format(host.public_hostname) # TODO: For not write_host is handles both master and nodes. # Technically only nodes will ever need this. - if not scheduleable: - facts += ' openshift_scheduleable=False' + if not schedulable: + facts += ' openshift_schedulable=False' installer_host = socket.gethostname() if installer_host in [host.connect_to, host.hostname, host.public_hostname]: facts += ' ansible_connection=local' diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index c951b6580..90b6b15a3 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -616,7 +616,7 @@ class AttendedCliTests(OOCliFixture): #pylint: disable=too-many-arguments,too-many-branches def _build_input(self, ssh_user=None, hosts=None, variant_num=None, - add_nodes=None, confirm_facts=None, scheduleable_masters_ok=None, + add_nodes=None, confirm_facts=None, schedulable_masters_ok=None, master_lb=None): """ Builds a CLI input string with newline characters to simulate @@ -658,7 +658,7 @@ class AttendedCliTests(OOCliFixture): # TODO: support option 2, fresh install if add_nodes: - if scheduleable_masters_ok: + if schedulable_masters_ok: inputs.append('y') inputs.append('1') # Add more nodes i = 0 @@ -712,7 +712,7 @@ class AttendedCliTests(OOCliFixture): inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) self.assertEquals('False', - inventory.get('nodes', '10.0.0.1 openshift_scheduleable')) + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) self.assertEquals(None, inventory.get('nodes', '10.0.0.2')) self.assertEquals(None, @@ -790,7 +790,7 @@ class AttendedCliTests(OOCliFixture): add_nodes=[('10.0.0.2', False)], ssh_user='root', variant_num=1, - scheduleable_masters_ok=True, + schedulable_masters_ok=True, confirm_facts='y') self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, @@ -830,13 +830,13 @@ class AttendedCliTests(OOCliFixture): inventory = ConfigParser.ConfigParser(allow_no_value=True) inventory.read(os.path.join(self.work_dir, '.ansible/hosts')) self.assertEquals('False', - inventory.get('nodes', '10.0.0.1 openshift_scheduleable')) + inventory.get('nodes', '10.0.0.1 openshift_schedulable')) self.assertEquals('False', - inventory.get('nodes', '10.0.0.2 openshift_scheduleable')) + inventory.get('nodes', '10.0.0.2 openshift_schedulable')) self.assertEquals(None, inventory.get('nodes', '10.0.0.3')) self.assertEquals('False', - inventory.get('nodes', '10.0.0.4 openshift_scheduleable')) + inventory.get('nodes', '10.0.0.4 openshift_schedulable')) return -- cgit v1.2.3 From af009a7a51d7b6f5799a14c452cc7db92727135e Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Wed, 18 Nov 2015 16:19:19 -0600 Subject: Fedora changes: - ansible bootstrap playbook for Fedora 23+ - add conditionals to handle yum vs dnf - add Fedora OpenShift COPR - update BYO host README for repo configs and fedora bootstrap Fix typo in etcd README, remove unnecessary parens in openshift_node main.yml rebase on master, update package cache refresh handler for yum vs dnf Fix typo in etcd README, remove unnecessary parens in openshift_node main.yml --- README_origin.md | 15 ++++++++ inventory/byo/hosts.example | 4 +++ playbooks/adhoc/bootstrap-fedora.yml | 5 +++ playbooks/adhoc/uninstall.yml | 34 +++++++++++++++++- roles/ansible/tasks/main.yml | 7 ++++ roles/cockpit/tasks/main.yml | 12 +++++++ roles/copr_cli/tasks/main.yml | 6 ++++ roles/docker/tasks/main.yml | 5 +++ roles/etcd/README.md | 2 +- roles/etcd/tasks/main.yml | 5 +++ roles/flannel/README.md | 3 +- roles/flannel/tasks/main.yml | 6 ++++ roles/fluentd_master/tasks/main.yml | 7 ++++ roles/fluentd_node/tasks/main.yml | 7 ++++ roles/haproxy/tasks/main.yml | 7 ++++ roles/kube_nfs_volumes/tasks/main.yml | 5 +++ roles/kube_nfs_volumes/tasks/nfs.yml | 5 +++ roles/openshift_ansible_inventory/tasks/main.yml | 10 ++++++ roles/openshift_expand_partition/README.md | 2 +- roles/openshift_expand_partition/tasks/main.yml | 5 +++ roles/openshift_facts/tasks/main.yml | 7 ++++ roles/openshift_master/tasks/main.yml | 20 +++++++++-- roles/openshift_master_ca/tasks/main.yml | 6 ++++ roles/openshift_node/tasks/main.yml | 13 ++++++- .../openshift_node/tasks/storage_plugins/ceph.yml | 7 ++++ .../tasks/storage_plugins/glusterfs.yml | 7 ++++ .../repos/maxamillion-fedora-openshift-fedora.repo | 8 +++++ roles/openshift_repos/handlers/main.yml | 5 ++- roles/openshift_repos/tasks/main.yaml | 42 +++++++++++++++++----- roles/openshift_storage_nfs_lvm/tasks/nfs.yml | 5 +++ roles/os_env_extras/tasks/main.yaml | 7 ++++ roles/os_firewall/tasks/firewall/firewalld.yml | 8 +++++ roles/os_firewall/tasks/firewall/iptables.yml | 11 ++++++ roles/os_update_latest/tasks/main.yml | 5 +++ roles/yum_repos/README.md | 2 +- utils/site_assets/oo-install-bootstrap.sh | 9 ++++- 36 files changed, 296 insertions(+), 18 deletions(-) create mode 100644 playbooks/adhoc/bootstrap-fedora.yml create mode 100644 roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo (limited to 'inventory/byo') diff --git a/README_origin.md b/README_origin.md index cb213a93a..343ecda3d 100644 --- a/README_origin.md +++ b/README_origin.md @@ -39,6 +39,12 @@ subscription-manager repos \ ``` * Configuration of router is not automated yet * Configuration of docker-registry is not automated yet +* Fedora 23+ doesn't come with python2 and will need a quick bootstrap. Setup + your inventory as described below and run the following (substituting the + `$PATH_TO_INVENTORY_FILE` with the actual path to your inventory file): +```sh +ansible-playbook ./playbooks/adhoc/bootstrap-fedora.yml -i $PATH_TO_INVENTORY_FILE +``` ## Configuring the host inventory [Ansible docs](http://docs.ansible.com/intro_inventory.html) @@ -59,6 +65,7 @@ nodes # Set variables common for all OSEv3 hosts [OSv3:vars] + # SSH user, this user should allow ssh based auth without requiring a password ansible_ssh_user=root @@ -75,6 +82,14 @@ osv3-master.example.com [nodes] osv3-master.example.com osv3-node[1:2].example.com + +# host group for etcd +[etcd] +osv3-etcd[1:3].example.com + +[lb] +osv3-lb.example.com + ``` The hostnames above should resolve both from the hosts themselves and diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 1a67cc290..29d81d64f 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -36,6 +36,10 @@ deployment_type=atomic-enterprise # Origin copr repo #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +# Origin Fedora copr repo +# Use this if you are installing on Fedora +#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}] + # htpasswd auth openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}] diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml new file mode 100644 index 000000000..de9f36c8a --- /dev/null +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -0,0 +1,5 @@ +- hosts: OSv3 + gather_facts: false + tasks: + - name: install python and deps for ansible modules + raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 1f1ada3f0..08a2ea6fb 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -48,7 +48,39 @@ - pcsd - yum: name={{ item }} state=absent - when: not is_atomic | bool + when: ansible_pkg_mgr == "yum" and not is_atomic | bool + with_items: + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - corosync + - etcd + - openshift + - openshift-master + - openshift-node + - openshift-sdn + - openshift-sdn-ovs + - openvswitch + - origin + - origin-clients + - origin-master + - origin-node + - origin-sdn-ovs + - pacemaker + - pcs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node + - tuned-profiles-openshift-node + - tuned-profiles-origin-node + + - dnf: name={{ item }} state=absent + when: ansible_pkg_mgr == "dnf" and not is_atomic | bool with_items: - atomic-enterprise - atomic-enterprise-master diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml index 5d20a3b35..f79273824 100644 --- a/roles/ansible/tasks/main.yml +++ b/roles/ansible/tasks/main.yml @@ -5,6 +5,13 @@ yum: pkg: ansible state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install Ansible + dnf: + pkg: ansible + state: installed + when: ansible_pkg_mgr == "dnf" - include: config.yml vars: diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index 875cbad21..8410e7c90 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -8,6 +8,18 @@ - cockpit-shell - cockpit-bridge - "{{ cockpit_plugins }}" + when: ansible_pkg_mgr == "yum" + +- name: Install cockpit-ws + dnf: + name: "{{ item }}" + state: present + with_items: + - cockpit-ws + - cockpit-shell + - cockpit-bridge + - "{{ cockpit_plugins }}" + when: ansible_pkg_mgr == "dnf" - name: Enable cockpit-ws service: diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml index f7ef1c26e..f8496199d 100644 --- a/roles/copr_cli/tasks/main.yml +++ b/roles/copr_cli/tasks/main.yml @@ -2,3 +2,9 @@ - yum: name: copr-cli state: present + when: ansible_pkg_mgr == "yum" + +- dnf: + name: copr-cli + state: present + when: ansible_pkg_mgr == "dnf" diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 96949230d..dd4401389 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -2,6 +2,11 @@ # tasks file for docker - name: Install docker yum: pkg=docker + when: ansible_pkg_mgr == "yum" + +- name: Install docker + dnf: pkg=docker + when: ansible_pkg_mgr == "dnf" - name: enable and start the docker service service: name=docker enabled=yes state=started diff --git a/roles/etcd/README.md b/roles/etcd/README.md index 88e4ff874..329a926c0 100644 --- a/roles/etcd/README.md +++ b/roles/etcd/README.md @@ -7,7 +7,7 @@ Requirements ------------ This role assumes it's being deployed on a RHEL/Fedora based host with package -named 'etcd' available via yum. +named 'etcd' available via yum or dnf (conditionally). Role Variables -------------- diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index fcbdecd37..efaab5f31 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -9,6 +9,11 @@ - name: Install etcd yum: pkg=etcd-2.* state=present + when: ansible_pkg_mgr == "yum" + +- name: Install etcd + dnf: pkg=etcd* state=present + when: ansible_pkg_mgr == "dnf" - name: Validate permissions on the config dir file: diff --git a/roles/flannel/README.md b/roles/flannel/README.md index b8aa830ac..8f271aada 100644 --- a/roles/flannel/README.md +++ b/roles/flannel/README.md @@ -7,7 +7,8 @@ Requirements ------------ This role assumes it's being deployed on a RHEL/Fedora based host with package -named 'flannel' available via yum, in version superior to 0.3. +named 'flannel' available via yum or dnf (conditionally), in version superior +to 0.3. Role Variables -------------- diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index acfb009ec..86e1bc96e 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,6 +2,12 @@ - name: Install flannel sudo: true yum: pkg=flannel state=present + when: ansible_pkg_mgr == "yum" + +- name: Install flannel + sudo: true + dnf: pkg=flannel state=present + when: ansible_pkg_mgr == "dnf" - name: Set flannel etcd url sudo: true diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml index 55cd94460..43c499b4d 100644 --- a/roles/fluentd_master/tasks/main.yml +++ b/roles/fluentd_master/tasks/main.yml @@ -4,6 +4,13 @@ yum: name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state: present + when: ansible_pkg_mgr == "yum" + +- name: download and install td-agent + dnf: + name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' + state: present + when: ansible_pkg_mgr == "dnf" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml index f9ef30b83..827a1c075 100644 --- a/roles/fluentd_node/tasks/main.yml +++ b/roles/fluentd_node/tasks/main.yml @@ -4,6 +4,13 @@ yum: name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state: present + when: ansible_pkg_mgr == "yum" + +- name: download and install td-agent + dnf: + name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' + state: present + when: ansible_pkg_mgr == "dnf" - name: Verify fluentd plugin installed command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml index 5638b7313..5d015fadd 100644 --- a/roles/haproxy/tasks/main.yml +++ b/roles/haproxy/tasks/main.yml @@ -3,6 +3,13 @@ yum: pkg: haproxy state: present + when: ansible_pkg_mgr == "yum" + +- name: Install haproxy + dnf: + pkg: haproxy + state: present + when: ansible_pkg_mgr == "dnf" - name: Configure haproxy template: diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml index d1dcf261a..3fcb9fd18 100644 --- a/roles/kube_nfs_volumes/tasks/main.yml +++ b/roles/kube_nfs_volumes/tasks/main.yml @@ -1,6 +1,11 @@ --- - name: Install pyparted (RedHat/Fedora) yum: name=pyparted,python-httplib2 state=present + when: ansible_pkg_mgr == "yum" + +- name: Install pyparted (RedHat/Fedora) + dnf: name=pyparted,python-httplib2 state=present + when: ansible_pkg_mgr == "dnf" - name: partition the drives partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }} diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml index 559fcf17c..a58a7b824 100644 --- a/roles/kube_nfs_volumes/tasks/nfs.yml +++ b/roles/kube_nfs_volumes/tasks/nfs.yml @@ -1,6 +1,11 @@ --- - name: Install NFS server on Fedora/Red Hat yum: name=nfs-utils state=present + when: ansible_pkg_mgr == "yum" + +- name: Install NFS server on Fedora/Red Hat + dnf: name=nfs-utils state=present + when: ansible_pkg_mgr == "dnf" - name: Start rpcbind on Fedora/Red Hat service: name=rpcbind state=started enabled=yes diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml index f6919dada..2b99f8bcd 100644 --- a/roles/openshift_ansible_inventory/tasks/main.yml +++ b/roles/openshift_ansible_inventory/tasks/main.yml @@ -2,6 +2,16 @@ - yum: name: "{{ item }}" state: present + when: ansible_pkg_mgr == "yum" + with_items: + - openshift-ansible-inventory + - openshift-ansible-inventory-aws + - openshift-ansible-inventory-gce + +- dnf: + name: "{{ item }}" + state: present + when: ansible_pkg_mgr == "dnf" with_items: - openshift-ansible-inventory - openshift-ansible-inventory-aws diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md index cd394e1ba..aed4ec871 100644 --- a/roles/openshift_expand_partition/README.md +++ b/roles/openshift_expand_partition/README.md @@ -8,7 +8,7 @@ partition, and then expanding the file system on the partition. * A machine with a disk that is not fully utilized -* cloud-utils-growpart rpm (either installed or avialable via yum) +* cloud-utils-growpart rpm (either installed or avialable via yum or dnf) * The partition you are expanding needs to be at the end of the partition list diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 8bc399070..42e7903fd 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,6 +1,11 @@ --- - name: Ensure growpart is installed yum: pkg=cloud-utils-growpart state=present + when: ansible_pkg_mgr == "yum" + +- name: Ensure growpart is installed + dnf: pkg=cloud-utils-growpart state=present + when: ansible_pkg_mgr == "dnf" - name: Grow the partitions command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml index 913f0dc78..2e889d7d5 100644 --- a/roles/openshift_facts/tasks/main.yml +++ b/roles/openshift_facts/tasks/main.yml @@ -8,6 +8,13 @@ - name: Ensure PyYaml is installed yum: pkg={{ item }} state=installed + when: ansible_pkg_mgr == "yum" + with_items: + - PyYAML + +- name: Ensure PyYaml is installed + dnf: pkg={{ item }} state=installed + when: ansible_pkg_mgr == "dnf" with_items: - PyYAML diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 2cf2a53c4..9d7880041 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -79,6 +79,12 @@ - name: Install Master package yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install Master package + dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" register: install_result # TODO: These values need to be configurable @@ -118,7 +124,12 @@ - name: Install httpd-tools if needed yum: pkg=httpd-tools state=present - when: item.kind == 'HTPasswdPasswordIdentityProvider' + when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider') + with_items: openshift.master.identity_providers + +- name: Install httpd-tools if needed + dnf: pkg=httpd-tools state=present + when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider') with_items: openshift.master.identity_providers - name: Ensure htpasswd directory exists @@ -263,7 +274,12 @@ - name: Install cluster packages yum: pkg=pcs state=present - when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + when: (ansible_pkg_mgr == "yum") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' + register: install_result + +- name: Install cluster packages + dnf: pkg=pcs state=present + when: (ansible_pkg_mgr == "dnf") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker' register: install_result - name: Start and enable cluster service diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 314f068e7..caac13be3 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -1,6 +1,12 @@ --- - name: Install the base package for admin tooling yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install the base package for admin tooling + dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" register: install_result - name: Reload generated facts diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 42d984a09..36bcc1a90 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -40,12 +40,23 @@ # problems because the rpms don't pin the version properly. - name: Install Node package yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present + when: ansible_pkg_mgr == "yum" + register: node_install_result + +- name: Install Node package + dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present + when: ansible_pkg_mgr == "dnf" register: node_install_result - name: Install sdn-ovs package yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present register: sdn_install_result - when: openshift.common.use_openshift_sdn + when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn + +- name: Install sdn-ovs package + dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present + register: sdn_install_result + when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn # TODO: add the validate parameter when there is a validation command to run - name: Create the Node config diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index b6936618a..b5146dcac 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -3,3 +3,10 @@ yum: pkg: ceph-common state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install Ceph storage plugin dependencies + dnf: + pkg: ceph-common + state: installed + when: ansible_pkg_mgr == "dnf" diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index decf4f49d..a357023e1 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -3,6 +3,13 @@ yum: pkg: glusterfs-fuse state: installed + when: ansible_pkg_mgr == "yum" + +- name: Install GlusterFS storage plugin dependencies + dnf: + pkg: glusterfs-fuse + state: installed + when: ansible_pkg_mgr == "dnf" - name: Set sebooleans to allow gluster storage plugin access from containers seboolean: diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo new file mode 100644 index 000000000..bc0435d82 --- /dev/null +++ b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo @@ -0,0 +1,8 @@ +[maxamillion-fedora-openshift] +name=Copr repo for fedora-openshift owned by maxamillion +baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/ +skip_if_unavailable=True +gpgcheck=1 +gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg +enabled=1 +enabled_metadata=1 \ No newline at end of file diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml index 26558a455..fed4ab2f0 100644 --- a/roles/openshift_repos/handlers/main.yml +++ b/roles/openshift_repos/handlers/main.yml @@ -1,3 +1,6 @@ --- -- name: refresh package cache +- name: refresh yum cache command: yum clean all + +- name: refresh dnf cache + command: dnf clean all diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 66be0cb7b..c55b5df89 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -14,38 +14,64 @@ yum: pkg: libselinux-python state: present + when: ansible_pkg_mgr == "yum" + +- name: Ensure libselinux-python is installed + dnf: + pkg: libselinux-python + state: present + when: ansible_pkg_mgr == "dnf" - name: Create any additional repos that are defined template: src: yum_repo.j2 dest: /etc/yum.repos.d/openshift_additional.repo when: openshift_additional_repos | length > 0 - notify: refresh package cache + notify: refresh yum cache - name: Remove the additional repos if no longer defined file: dest: /etc/yum.repos.d/openshift_additional.repo state: absent when: openshift_additional_repos | length == 0 - notify: refresh package cache + notify: refresh yum cache -- name: Remove any yum repo files for other deployment types +- name: Remove any yum repo files for other deployment types RHEL/CentOS file: path: "/etc/yum.repos.d/{{ item | basename }}" state: absent with_fileglob: - '*/repos/*' - when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) - notify: refresh package cache + when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and + (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + notify: refresh yum cache + +- name: Remove any yum repo files for other deployment types Fedora + file: + path: "/etc/yum.repos.d/{{ item | basename }}" + state: absent + with_fileglob: + - '*/repos/*' + when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and + (ansible_distribution == "Fedora") + notify: refresh dnf cache - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" - notify: refresh package cache + notify: refresh yum cache -- name: Configure yum repositories +- name: Configure yum repositories RHEL/CentOS copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" - notify: refresh package cache + notify: refresh yum cache + when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora") + +- name: Configure yum repositories Fedora + copy: src={{ item }} dest=/etc/yum.repos.d/ + with_fileglob: + - "fedora-{{ openshift_deployment_type }}/repos/*" + notify: refresh dnf cache + when: (ansible_distribution == "Fedora") diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index 65ae069df..bf23dfe98 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,6 +1,11 @@ --- - name: Install NFS server yum: name=nfs-utils state=present + when: ansible_pkg_mgr == "yum" + +- name: Install NFS server + dnf: name=nfs-utils state=present + when: ansible_pkg_mgr == "dnf" - name: Start rpcbind service: name=rpcbind state=started enabled=yes diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml index 96b12ad5b..29599559c 100644 --- a/roles/os_env_extras/tasks/main.yaml +++ b/roles/os_env_extras/tasks/main.yaml @@ -15,3 +15,10 @@ yum: pkg: bash-completion state: installed + when: ansible_pkg_mgr == "yum" + +- name: Bash Completion + dnf: + pkg: bash-completion + state: installed + when: ansible_pkg_mgr == "dnf" diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 5089eb3e0..cf2a2c733 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -3,6 +3,14 @@ yum: name: firewalld state: present + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install firewalld packages + dnf: + name: firewalld + state: present + when: ansible_pkg_mgr == "dnf" register: install_result - name: Check if iptables-services is installed diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 9af9d8d29..36d51504c 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -6,6 +6,17 @@ with_items: - iptables - iptables-services + when: ansible_pkg_mgr == "yum" + register: install_result + +- name: Install iptables packages + dnf: + name: "{{ item }}" + state: present + with_items: + - iptables + - iptables-services + when: ansible_pkg_mgr == "dnf" register: install_result - name: Check if firewalld is installed diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml index 4a2c3d47a..40eec8d35 100644 --- a/roles/os_update_latest/tasks/main.yml +++ b/roles/os_update_latest/tasks/main.yml @@ -1,3 +1,8 @@ --- - name: Update all packages yum: name=* state=latest + when: ansible_pkg_mgr == "yum" + +- name: Update all packages + dnf: name=* state=latest + when: ansible_pkg_mgr == "dnf" diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md index 51ecd5d34..908ab4972 100644 --- a/roles/yum_repos/README.md +++ b/roles/yum_repos/README.md @@ -6,7 +6,7 @@ This role allows easy deployment of yum repository config files. Requirements ------------ -Yum +Yum or dnf Role Variables -------------- diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh index e1b2cec90..3847c029a 100755 --- a/utils/site_assets/oo-install-bootstrap.sh +++ b/utils/site_assets/oo-install-bootstrap.sh @@ -9,6 +9,13 @@ cmdlnargs="$@" : ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log} [[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/" +if rpm -q dnf; +then + PKG_MGR="dnf" +else + PKG_MGR="yum" +fi + if [ $OO_INSTALL_CONTEXT != 'origin_vm' ] then clear @@ -18,7 +25,7 @@ if [ -e /etc/redhat-release ] then for i in python python-virtualenv openssh-clients gcc do - rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; } + rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; } done fi for i in python virtualenv ssh gcc -- cgit v1.2.3 From 192ccc8e6e6f465351828f32e9dc43b840897b67 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Tue, 1 Dec 2015 16:30:05 -0500 Subject: Refactor dns options and facts. --- inventory/byo/hosts.example | 3 +++ playbooks/common/openshift-cluster/config.yml | 3 --- playbooks/common/openshift-master/config.yml | 16 ---------------- roles/openshift_facts/library/openshift_facts.py | 8 ++++---- roles/openshift_master/tasks/main.yml | 8 +------- roles/openshift_node/tasks/main.yml | 7 +------ roles/openshift_node/templates/node.yaml.v1.j2 | 6 ++++-- 7 files changed, 13 insertions(+), 38 deletions(-) (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example index 1a67cc290..dd8d8f855 100644 --- a/inventory/byo/hosts.example +++ b/inventory/byo/hosts.example @@ -154,6 +154,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # entries for the interfaces attached to the host. #openshift_set_hostname=True +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + # host group for masters [masters] ose3-master[1:3]-ansible.test.example.com diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index a8bd634d3..482fa8441 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -6,6 +6,3 @@ - include: ../openshift-master/config.yml - include: ../openshift-node/config.yml - vars: - osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}" - osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1c8a92122..785a78497 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -353,22 +353,6 @@ - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool - # TODO: Setting the cluster dns ip should be pushed into openshift-facts -- name: Determine cluster dns ip - hosts: oo_first_master - tasks: - - name: Get master service ip - # This command has to be on a single line. - command: "{{ openshift.common.client_binary }} -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\} --output-version=v1" - register: master_service_ip_output - when: openshift.common.version_greater_than_3_1_or_1_1 | bool - - set_fact: - cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}" - when: not openshift.common.version_greater_than_3_1_or_1_1 | bool - - set_fact: - cluster_dns_ip: "{{ master_service_ip_output.stdout }}" - when: openshift.common.version_greater_than_3_1_or_1_1 | bool - - name: Enable cockpit hosts: oo_first_master vars: diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index b60e42c71..e937b742e 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -528,9 +528,9 @@ def set_aggregate_facts(facts): internal_hostnames.add(facts['common']['hostname']) internal_hostnames.add(facts['common']['ip']) + cluster_domain = facts['common']['dns_domain'] + if 'master' in facts: - # FIXME: not sure why but facts['dns']['domain'] fails - cluster_domain = 'cluster.local' if 'cluster_hostname' in facts['master']: all_hostnames.add(facts['master']['cluster_hostname']) if 'cluster_public_hostname' in facts['master']: @@ -985,7 +985,7 @@ class OpenShiftFacts(object): Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd'] + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd'] def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False): self.changed = False @@ -1056,6 +1056,7 @@ class OpenShiftFacts(object): public_hostname=hostname) common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc' common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm' + common['dns_domain'] = 'cluster.local' defaults['common'] = common if 'master' in roles: @@ -1076,7 +1077,6 @@ class OpenShiftFacts(object): node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16', iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node - return defaults def guess_host_provider(self): diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 2cf2a53c4..5d4ddfca0 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -81,14 +81,8 @@ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present register: install_result -# TODO: These values need to be configurable -- name: Set dns facts +- name: Re-gather package dependent master facts openshift_facts: - role: dns - local_facts: - ip: "{{ openshift_master_cluster_vip | default(openshift.common.ip, true) | default(None) }}" - domain: cluster.local - when: openshift.master.embedded_dns - name: Create config parent directory if it does not exist file: diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 42d984a09..1d3ac7c09 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,11 +1,5 @@ --- # TODO: allow for overriding default ports where possible -- fail: - msg: This role requres that osn_cluster_dns_domain is set - when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain -- fail: - msg: This role requres that osn_cluster_dns_ip is set - when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip - fail: msg: "SELinux is disabled, This deployment type requires that SELinux is enabled." when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise'] @@ -20,6 +14,7 @@ hostname: "{{ openshift_hostname | default(none) }}" public_hostname: "{{ openshift_public_hostname | default(none) }}" deployment_type: "{{ openshift_deployment_type }}" + dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip | default(None, true), true) }}" - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 41a303dee..23bd81f91 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -1,7 +1,9 @@ allowDisabledDocker: false apiVersion: v1 -dnsDomain: {{ osn_cluster_dns_domain }} -dnsIP: {{ osn_cluster_dns_ip }} +dnsDomain: {{ openshift.common.dns_domain }} +{% if 'dns_ip' in openshift.common %} +dnsIP: {{ openshift.common.dns_ip }} +{% endif %} dockerConfig: execHandlerName: "" iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" -- cgit v1.2.3 From 5ffc8386461114c6fff4f27917ee09ae744d20b4 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 20 Nov 2015 10:29:16 -0500 Subject: Add unique AEP, OSE, and Origin BYO inventories --- inventory/byo/hosts.aep.example | 178 ++++++++++++++++++++++++++++++++++++ inventory/byo/hosts.example | 179 ------------------------------------ inventory/byo/hosts.origin.example | 182 +++++++++++++++++++++++++++++++++++++ inventory/byo/hosts.ose.example | 178 ++++++++++++++++++++++++++++++++++++ 4 files changed, 538 insertions(+), 179 deletions(-) create mode 100644 inventory/byo/hosts.aep.example delete mode 100644 inventory/byo/hosts.example create mode 100644 inventory/byo/hosts.origin.example create mode 100644 inventory/byo/hosts.ose.example (limited to 'inventory/byo') diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example new file mode 100644 index 000000000..d5b872e06 --- /dev/null +++ b/inventory/byo/hosts.aep.example @@ -0,0 +1,178 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise +deployment_type=atomic-enterprise + +# Enable cluster metrics +#use_cluster_metrics=true + +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/aep3/aep-${component}:${version} + +# Additional yum repos to install +#openshift_additional_repos=[{'id': 'aep-devel', 'name': 'aep-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure Fluentd +#use_fluentd=true + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# default subdomain to use for exposed routes +#osm_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# set RPM version for debugging purposes +#openshift_pkg_version=-3.1.0.0 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# host group for masters +[masters] +aep3-master[1:3]-ansible.test.example.com + +[etcd] +aep3-etcd[1:3]-ansible.test.example.com + +[lb] +aep3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +aep3-master[1:3]-ansible.test.example.com +aep3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example deleted file mode 100644 index 799725a37..000000000 --- a/inventory/byo/hosts.example +++ /dev/null @@ -1,179 +0,0 @@ -# This is an example of a bring your own (byo) host inventory - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -lb - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_ssh_user=root - -# If ansible_ssh_user is not root, ansible_sudo must be set to true and the -# user must be configured for passwordless sudo -#ansible_sudo=true - -# deployment type valid values are origin, online and enterprise -deployment_type=atomic-enterprise - -# Enable cluster metrics -#use_cluster_metrics=true - -# Pre-release registry URL -#oreg_url=example.com/openshift3/ose-${component}:${version} - -# Pre-release Dev puddle repo -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] - -# Pre-release Errata puddle repo -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] - -# Origin copr repo -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] - -# Origin Fedora copr repo -# Use this if you are installing on Fedora -#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}] - -# htpasswd auth -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}] - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure Fluentd -#use_fluentd=true - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# Native high availbility cluster method with optional load balancer. -# If no lb group is defined installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -#openshift_master_cluster_method=native -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Pacemaker high availability cluster method. -# Pacemaker HA environment must be able to self provision the -# configured VIP. For installation openshift_master_cluster_hostname -# must resolve to the configured VIP. -#openshift_master_cluster_method=pacemaker -#openshift_master_cluster_password=openshift_cluster -#openshift_master_cluster_vip=192.168.133.25 -#openshift_master_cluster_public_vip=192.168.133.25 -#openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com - -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - -# default subdomain to use for exposed routes -#osm_default_subdomain=apps.test.example.com - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs'] - -# default selectors for router and registry services -# openshift_router_selector='region=infra' -# openshift_registry_selector='region=infra' - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# set RPM version for debugging purposes -#openshift_pkg_version=-3.0.0.0 - -# Configure custom named certificates -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates: true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_node_set_node_ip=True - -# Force setting of system hostname when configuring OpenShift -# This works around issues related to installations that do not have valid dns -# entries for the interfaces attached to the host. -#openshift_set_hostname=True - -# Configure dnsIP in the node config -#openshift_dns_ip=172.30.0.1 - -# host group for masters -[masters] -ose3-master[1:3]-ansible.test.example.com - -[etcd] -ose3-etcd[1:3]-ansible.test.example.com - -[lb] -ose3-lb-ansible.test.example.com - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. -[nodes] -ose3-master[1:3]-ansible.test.example.com -ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example new file mode 100644 index 000000000..77a3a04b4 --- /dev/null +++ b/inventory/byo/hosts.origin.example @@ -0,0 +1,182 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise +deployment_type=origin + +# Enable cluster metrics +#use_cluster_metrics=true + +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/openshift3/ose-${component}:${version} + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + +# Origin Fedora copr repo +# Use this if you are installing on Fedora +#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure Fluentd +#use_fluentd=true + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# default subdomain to use for exposed routes +#osm_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# set RPM version for debugging purposes +#openshift_pkg_version=-1.1 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +[lb] +ose3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example new file mode 100644 index 000000000..5a4310298 --- /dev/null +++ b/inventory/byo/hosts.ose.example @@ -0,0 +1,178 @@ +# This is an example of a bring your own (byo) host inventory + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +lb + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true and the +# user must be configured for passwordless sudo +#ansible_sudo=true + +# deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise +deployment_type=openshift-enterprise + +# Enable cluster metrics +#use_cluster_metrics=true + +# Add additional, insecure, and blocked registries to global docker configuration +# For enterprise deployment types we ensure that registry.access.redhat.com is +# included if you do not include it +#cli_docker_additional_registries=registry.example.com +#cli_docker_insecure_registries=registry.example.com +#cli_docker_blocked_registries=registry.hacker.com + +# Alternate image format string. If you're not modifying the format string and +# only need to inject your own registry you may want to consider +# cli_docker_additional_registries instead +#oreg_url=example.com/openshift3/ose-${component}:${version} + +# Additional yum repos to install +#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# htpasswd auth +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/htpasswd'}] + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure Fluentd +#use_fluentd=true + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# Native high availbility cluster method with optional load balancer. +# If no lb group is defined installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +#openshift_master_cluster_method=native +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Pacemaker high availability cluster method. +# Pacemaker HA environment must be able to self provision the +# configured VIP. For installation openshift_master_cluster_hostname +# must resolve to the configured VIP. +#openshift_master_cluster_method=pacemaker +#openshift_master_cluster_password=openshift_cluster +#openshift_master_cluster_vip=192.168.133.25 +#openshift_master_cluster_public_vip=192.168.133.25 +#openshift_master_cluster_hostname=openshift-ansible.test.example.com +#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# Override the default controller lease ttl +#osm_controller_lease_ttl=30 + +# default subdomain to use for exposed routes +#osm_default_subdomain=apps.test.example.com + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs'] + +# default selectors for router and registry services +# openshift_router_selector='region=infra' +# openshift_registry_selector='region=infra' + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# set RPM version for debugging purposes +#openshift_pkg_version=-3.1.0.0 + +# Configure custom named certificates +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates: true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}] + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_node_set_node_ip=True + +# Force setting of system hostname when configuring OpenShift +# This works around issues related to installations that do not have valid dns +# entries for the interfaces attached to the host. +#openshift_set_hostname=True + +# Configure dnsIP in the node config +#openshift_dns_ip=172.30.0.1 + +# host group for masters +[masters] +ose3-master[1:3]-ansible.test.example.com + +[etcd] +ose3-etcd[1:3]-ansible.test.example.com + +[lb] +ose3-lb-ansible.test.example.com + +# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes +# However, in order to ensure that your masters are not burdened with running pods you should +# make them unschedulable by adding openshift_schedulable=False any node that's also a master. +[nodes] +ose3-master[1:3]-ansible.test.example.com +ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" -- cgit v1.2.3