From 63fb0c74fcb0adf4cd3b0b2b5d30e34e29a58796 Mon Sep 17 00:00:00 2001 From: Tomas Sedovic Date: Fri, 27 Oct 2017 17:27:51 +0200 Subject: Remove the extra roles The `openstack-stack` role is now under `openshift_openstack` and the `openstack-create-cinder-registry` one will be added there, later. --- roles/static_inventory/defaults/main.yml | 29 ----- roles/static_inventory/meta/main.yml | 3 - roles/static_inventory/tasks/checkpoint.yml | 17 --- .../tasks/filter_out_new_app_nodes.yaml | 15 --- roles/static_inventory/tasks/main.yml | 25 ----- roles/static_inventory/tasks/openstack.yml | 120 --------------------- roles/static_inventory/tasks/sshconfig.yml | 13 --- roles/static_inventory/tasks/sshtun.yml | 15 --- roles/static_inventory/templates/inventory.j2 | 104 ------------------ .../templates/openstack_ssh_config.j2 | 21 ---- .../templates/ssh-tunnel.service.j2 | 20 ---- 11 files changed, 382 deletions(-) delete mode 100644 roles/static_inventory/defaults/main.yml delete mode 100644 roles/static_inventory/meta/main.yml delete mode 100644 roles/static_inventory/tasks/checkpoint.yml delete mode 100644 roles/static_inventory/tasks/filter_out_new_app_nodes.yaml delete mode 100644 roles/static_inventory/tasks/main.yml delete mode 100644 roles/static_inventory/tasks/openstack.yml delete mode 100644 roles/static_inventory/tasks/sshconfig.yml delete mode 100644 roles/static_inventory/tasks/sshtun.yml delete mode 100644 roles/static_inventory/templates/inventory.j2 delete mode 100644 roles/static_inventory/templates/openstack_ssh_config.j2 delete mode 100644 roles/static_inventory/templates/ssh-tunnel.service.j2 (limited to 'roles/static_inventory') diff --git a/roles/static_inventory/defaults/main.yml b/roles/static_inventory/defaults/main.yml deleted file mode 100644 index 871700f8c..000000000 --- a/roles/static_inventory/defaults/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# Either to checkpoint the dynamic inventory into a static one -refresh_inventory: True -inventory: static -inventory_path: ~/openstack-inventory - -# Either to configure bastion -use_bastion: true - -# SSH user/key/options to access hosts via bastion -ssh_user: openshift -ssh_options: >- - -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no - -o ConnectTimeout=90 -o ControlMaster=auto -o ControlPersist=270s - -o ServerAliveInterval=30 -o GSSAPIAuthentication=no - -# SSH key to access nodes -private_ssh_key: ~/.ssh/openshift - -# The patch to store the generated config to access bastion/hosts -ssh_config_path: /tmp/ssh.config.ansible - -# The IP:port to make an SSH tunnel to access UI on the 1st master -# via bastion node (requires sudo on the ansible control node) -ui_ssh_tunnel: False -ui_port: "{{ openshift_master_api_port | default(8443) }}" -target_ip: "{{ hostvars[groups['masters.' + stack_name|quote][0]].private_v4 }}" - -openstack_private_network: private diff --git a/roles/static_inventory/meta/main.yml b/roles/static_inventory/meta/main.yml deleted file mode 100644 index fdda41bb3..000000000 --- a/roles/static_inventory/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - role: common diff --git a/roles/static_inventory/tasks/checkpoint.yml b/roles/static_inventory/tasks/checkpoint.yml deleted file mode 100644 index c0365bd3d..000000000 --- a/roles/static_inventory/tasks/checkpoint.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: check for static inventory dir - stat: - path: "{{ inventory_path }}" - register: stat_inventory_path - -- name: create static inventory dir - file: - path: "{{ inventory_path }}" - state: directory - mode: 0750 - when: not stat_inventory_path.stat.exists - -- name: create inventory from template - template: - src: inventory.j2 - dest: "{{ inventory_path }}/hosts" diff --git a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml b/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml deleted file mode 100644 index 826efe78d..000000000 --- a/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Add all new app nodes to new_app_nodes - when: - - 'oc_old_app_nodes is defined' - - 'oc_old_app_nodes | list' - - 'node.name not in oc_old_app_nodes' - - 'node["metadata"]["sub-host-type"] == "app"' - register: result - set_fact: - new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]' - -- name: If the node was added to new_nodes, remove it from registered nodes - set_fact: - registered_nodes: '{{ registered_nodes | difference([ node ]) }}' - when: 'not result | skipped' diff --git a/roles/static_inventory/tasks/main.yml b/roles/static_inventory/tasks/main.yml deleted file mode 100644 index 3dab62df2..000000000 --- a/roles/static_inventory/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Remove any existing inventory - file: - path: "{{ inventory_path }}/hosts" - state: absent - -- name: Refresh the inventory - meta: refresh_inventory - -- name: Generate in-memory inventory - include: openstack.yml - -- name: Checkpoint in-memory data into a static inventory - include: checkpoint.yml - -- name: Generate SSH config for accessing hosts via bastion - include: sshconfig.yml - when: use_bastion|bool - -- name: Configure SSH tunneling to access UI - include: sshtun.yml - become: true - when: - - use_bastion|bool - - ui_ssh_tunnel|bool diff --git a/roles/static_inventory/tasks/openstack.yml b/roles/static_inventory/tasks/openstack.yml deleted file mode 100644 index adf78c966..000000000 --- a/roles/static_inventory/tasks/openstack.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -- no_log: true - block: - - name: fetch all nodes from openstack shade dynamic inventory - command: shade-inventory --list - register: registered_nodes_output - when: refresh_inventory|bool - - - name: set fact for openstack inventory cluster nodes - set_fact: - registered_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes - set_fact: - registered_bastion_nodes: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q2) }}" - vars: - q: "[] | [?metadata.group=='infra.{{stack_name}}']" - q2: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4!='']" - when: - - refresh_inventory|bool - - - name: set_fact for openstack inventory nodes with provider network - set_fact: - registered_nodes_floating: "{{ (registered_nodes_output.stdout | from_json) | json_query(q) }}" - vars: - q: "[] | [?metadata.clusterid=='{{stack_name}}'] | [?public_v4=='']" - when: - - refresh_inventory|bool - - openstack_provider_network_name|default(None) - - - name: Add cluster nodes w/o floating IPs to inventory - with_items: "{{ registered_nodes|difference(registered_nodes_floating) }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- else -%} - {%- set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - - - name: Add cluster nodes with floating IPs to inventory - with_items: "{{ registered_nodes_floating }}" - add_host: - name: '{{ item.name }}' - ansible_host: >- - {% if use_bastion|bool -%} - {{ item.name }} - {%- elif openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - ansible_fqdn: '{{ item.name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + item.name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: >- - {% if openstack_provider_network_name|default(None) -%} - {{ item.private_v4 }} - {%- else -%} - {{ item.public_v4 }} - {%- endif %} - - # Split registered_nodes into old nodes and new app nodes - # Add new app nodes to new_nodes host group for upscaling - - name: Create new_app_nodes variable - set_fact: - new_app_nodes: [] - - - name: Filter new app nodes out of registered_nodes - include: filter_out_new_app_nodes.yaml - with_items: "{{ registered_nodes }}" - loop_control: - loop_var: node - - - name: Add new app nodes to the new_nodes section (if a deployment already exists) - with_items: "{{ new_app_nodes }}" - add_host: - name: "{{ item.name }}" - groups: new_nodes, app - - - name: Add the rest of cluster nodes to their corresponding groups - with_items: "{{ registered_nodes }}" - add_host: - name: '{{ item.name }}' - groups: '{{ item.metadata.group }}' - - - name: Add bastion node to inventory - add_host: - name: bastion - groups: bastions - ansible_host: '{{ registered_bastion_nodes[0].public_v4 }}' - ansible_fqdn: '{{ registered_bastion_nodes[0].name }}' - ansible_user: '{{ ssh_user }}' - ansible_private_key_file: '{{ private_ssh_key }}' - ansible_ssh_extra_args: '-F {{ ssh_config_path }}' - private_v4: >- - {% set node = registered_nodes | json_query("[?name=='" + registered_bastion_nodes[0].name + "']") -%} - {{ node[0].addresses[openstack_private_network|quote][0].addr }} - public_v4: '{{ registered_bastion_nodes[0].public_v4 }}' - when: - - registered_bastion_nodes is defined - - use_bastion|bool diff --git a/roles/static_inventory/tasks/sshconfig.yml b/roles/static_inventory/tasks/sshconfig.yml deleted file mode 100644 index 7119fe6ff..000000000 --- a/roles/static_inventory/tasks/sshconfig.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: set ssh proxy command prefix for accessing nodes via bastion - set_fact: - ssh_proxy_command: >- - ssh {{ ssh_options }} - -i {{ private_ssh_key }} - {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} - -- name: regenerate ssh config - template: - src: openstack_ssh_config.j2 - dest: "{{ ssh_config_path }}" - mode: 0644 diff --git a/roles/static_inventory/tasks/sshtun.yml b/roles/static_inventory/tasks/sshtun.yml deleted file mode 100644 index b0e4c832c..000000000 --- a/roles/static_inventory/tasks/sshtun.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Create ssh tunnel systemd service - template: - src: ssh-tunnel.service.j2 - dest: /etc/systemd/system/ssh-tunnel.service - mode: 0644 - -- name: reload the systemctl daemon after file update - command: systemctl daemon-reload - -- name: Enable ssh tunnel service - service: - name: ssh-tunnel - enabled: true - state: restarted diff --git a/roles/static_inventory/templates/inventory.j2 b/roles/static_inventory/templates/inventory.j2 deleted file mode 100644 index 9dfbe3a5b..000000000 --- a/roles/static_inventory/templates/inventory.j2 +++ /dev/null @@ -1,104 +0,0 @@ -# BEGIN Autogenerated hosts -{% for host in groups['all'] %} -{% if hostvars[host].get('ansible_connection', '') == 'local' %} -{{ host }} ansible_connection=local -{% else %} - -{{ host }}{% if 'ansible_host' in hostvars[host] -%} ansible_host={{ hostvars[host]['ansible_host'] }}{% endif %} -{% if 'private_v4' in hostvars[host] -%} private_v4={{ hostvars[host]['private_v4'] }}{% endif %} -{% if 'public_v4' in hostvars[host] -%} public_v4={{ hostvars[host]['public_v4'] }}{% endif %} -{% if 'ansible_user' in hostvars[host] -%} ansible_user={{ hostvars[host]['ansible_user'] }}{% endif %} -{% if 'ansible_private_key_file' in hostvars[host] and hostvars[host]['ansible_private_key_file'] -%} ansible_private_key_file={{ hostvars[host]['ansible_private_key_file'] }}{% endif %} -{% if use_bastion|bool and 'ansible_ssh_extra_args' in hostvars[host] -%} ansible_ssh_extra_args={{ hostvars[host]['ansible_ssh_extra_args']|quote }}{% endif %} openshift_hostname={{ host }} - -{% endif %} -{% endfor %} -# END autogenerated hosts - -#[all:vars] -# For all group_vars, see ./group_vars/all.yml -[infra_hosts:vars] -openshift_node_labels={{ openshift_cluster_node_labels.infra | to_json | quote }} - -[app:vars] -openshift_node_labels={{ openshift_cluster_node_labels.app | to_json | quote }} - -# Create an OSEv3 group that contains the master, nodes, etcd, and lb groups. -# The lb group lets Ansible configure HAProxy as the load balancing solution. -# Comment lb out if your load balancer is pre-configured. -[cluster_hosts:children] -OSEv3 -dns - -[OSEv3:children] -nodes -etcd -lb -new_nodes - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] - -# For OSEv3 normal group vars, see ./group_vars/OSEv3.yml - -{% if cinder_registry_volume is defined and 'volume' in cinder_registry_volume %} -openshift_hosted_registry_storage_openstack_volumeID="{{ cinder_registry_volume.id }}" -openshift_hosted_registry_storage_volume_size="{{ cinder_registry_volume.volume.size }}Gi" -{% endif %} - - -# Host Groups - -[masters:children] -masters.{{ stack_name }} - -[etcd:children] -etcd.{{ stack_name }} -{% if 'etcd' not in groups or groups['etcd']|length == 0 %}masters.{{ stack_name }}{% endif %} - -[nodes:children] -masters -infra.{{ stack_name }} -nodes.{{ stack_name }} - -[infra_hosts:children] -infra.{{ stack_name }} - -[app:children] -nodes.{{ stack_name }} - -[dns:children] -dns.{{ stack_name }} - -[lb:children] -lb.{{ stack_name }} - -[new_nodes:children] - -# Empty placeholders for all groups of the cluster nodes -[masters.{{ stack_name }}] -[etcd.{{ stack_name }}] -[infra.{{ stack_name }}] -[nodes.{{ stack_name }}] -[app.{{ stack_name }}] -[dns.{{ stack_name }}] -[lb.{{ stack_name }}] -[new_nodes.{{ stack_name }}] - -# BEGIN Autogenerated groups -{% for group in groups %} -{% if group not in ['ungrouped', 'all'] %} -[{{ group }}] -{% for host in groups[group] %} -{{ host }} -{% endfor %} - -{% endif %} -{% endfor %} -# END Autogenerated groups diff --git a/roles/static_inventory/templates/openstack_ssh_config.j2 b/roles/static_inventory/templates/openstack_ssh_config.j2 deleted file mode 100644 index ad5d1253a..000000000 --- a/roles/static_inventory/templates/openstack_ssh_config.j2 +++ /dev/null @@ -1,21 +0,0 @@ -Host * - IdentitiesOnly yes - -Host bastion - Hostname {{ hostvars['bastion'].ansible_host }} - IdentityFile {{ hostvars['bastion'].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% for host in groups['all'] | difference(groups['bastions'][0]) %} - -Host {{ host }} - Hostname {{ hostvars[host].ansible_host }} - ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22 - IdentityFile {{ hostvars[host].ansible_private_key_file }} - User {{ ssh_user }} - StrictHostKeyChecking no - UserKnownHostsFile=/dev/null - -{% endfor %} diff --git a/roles/static_inventory/templates/ssh-tunnel.service.j2 b/roles/static_inventory/templates/ssh-tunnel.service.j2 deleted file mode 100644 index 0d1cf8f79..000000000 --- a/roles/static_inventory/templates/ssh-tunnel.service.j2 +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Set up ssh tunneling for OpenShift cluster UI -After=network.target - -[Service] -ExecStart=/usr/bin/ssh -NT -o \ - ServerAliveInterval=60 -o \ - UserKnownHostsFile=/dev/null -o \ - StrictHostKeyChecking=no -o \ - ExitOnForwardFailure=no -i \ - {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \ - -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }} - - -# Restart every >2 seconds to avoid StartLimitInterval failure -RestartSec=5 -Restart=always - -[Install] -WantedBy=multi-user.target -- cgit v1.2.3