summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomas Sedovic <tomas@sedovic.cz>2017-06-14 16:28:00 +0200
committerTomas Sedovic <tomas@sedovic.cz>2017-06-14 16:28:00 +0200
commit6241e33432ea88cf9c5bc67db6d09c90b2e891ba (patch)
tree4e82fafc32a598d9cf5c1c72b9c20e83268b0251
parent672f8e155bdc7244d4bf0cbcca5e4be5f063d55f (diff)
parent22e88c9ce8f81cb13c3d050455d332161a1acd83 (diff)
downloadopenshift-6241e33432ea88cf9c5bc67db6d09c90b2e891ba.tar.gz
openshift-6241e33432ea88cf9c5bc67db6d09c90b2e891ba.tar.bz2
openshift-6241e33432ea88cf9c5bc67db6d09c90b2e891ba.tar.xz
openshift-6241e33432ea88cf9c5bc67db6d09c90b2e891ba.zip
Merge redhat-cop/casl-ansible into openstack-provider
This imports the openstack provisioning bits of: https://github.com/redhat-cop/casl-ansible taking care to preserve the original history of those files.
-rw-r--r--roles/common/pre_tasks/pre_tasks.yml38
-rw-r--r--roles/dns-server-detect/defaults/main.yml3
-rw-r--r--roles/dns-server-detect/tasks/main.yml38
-rw-r--r--roles/hostnames/tasks/main.yaml26
-rw-r--r--roles/hostnames/test/inv12
l---------roles/hostnames/test/roles1
-rw-r--r--roles/hostnames/test/test.retry3
-rw-r--r--roles/hostnames/test/test.yaml21
-rw-r--r--roles/hostnames/vars/main.yaml2
-rw-r--r--roles/hostnames/vars/records.yaml28
-rw-r--r--roles/openshift-prep/tasks/main.yml4
-rw-r--r--roles/openshift-prep/tasks/prerequisites.yml36
-rw-r--r--roles/openstack-stack/README.md9
-rw-r--r--roles/openstack-stack/defaults/main.yml12
-rw-r--r--roles/openstack-stack/tasks/main.yml41
-rw-r--r--roles/openstack-stack/templates/heat_stack.yaml.j2620
-rw-r--r--roles/openstack-stack/templates/heat_stack_server.yaml.j2170
-rw-r--r--roles/openstack-stack/templates/user_data.j213
l---------roles/openstack-stack/test/roles1
-rw-r--r--roles/openstack-stack/test/stack-create-test.yml17
-rw-r--r--roles/subscription-manager/README.md156
-rw-r--r--roles/subscription-manager/pre_tasks/pre_tasks.yml45
-rw-r--r--roles/subscription-manager/tasks/main.yml122
23 files changed, 1418 insertions, 0 deletions
diff --git a/roles/common/pre_tasks/pre_tasks.yml b/roles/common/pre_tasks/pre_tasks.yml
new file mode 100644
index 000000000..c5e79e89c
--- /dev/null
+++ b/roles/common/pre_tasks/pre_tasks.yml
@@ -0,0 +1,38 @@
+---
+- name: Generate Environment ID
+ set_fact:
+ env_random_id: "{{ ansible_date_time.epoch }}"
+ run_once: true
+ delegate_to: localhost
+
+- name: Set default Environment ID
+ set_fact:
+ default_env_id: "casl-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}"
+ delegate_to: localhost
+
+- name: Setting Common Facts
+ set_fact:
+ env_id: "{{ env_id | default(default_env_id) }}"
+ delegate_to: localhost
+
+- name: Set Dynamic Inventory Filters
+ shell: >
+ export OS_INV_FILTER_KEY=clusterid && OS_INV_FILTER_VALUE={{ env_id }}
+ delegate_to: localhost
+
+- name: Updating DNS domain to include env_id (if not empty)
+ set_fact:
+ full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}"
+ delegate_to: localhost
+
+- name: Set the APP domain for OpenShift use
+ set_fact:
+ openshift_app_domain: "{{ openshift_app_domain | default('apps') }}"
+ delegate_to: localhost
+
+- name: Set the default app domain for routing purposes
+ set_fact:
+ openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}"
+ delegate_to: localhost
+ when:
+ - openshift_master_default_subdomain is undefined
diff --git a/roles/dns-server-detect/defaults/main.yml b/roles/dns-server-detect/defaults/main.yml
new file mode 100644
index 000000000..58bd861cd
--- /dev/null
+++ b/roles/dns-server-detect/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+external_nsupdate_keys: {}
diff --git a/roles/dns-server-detect/tasks/main.yml b/roles/dns-server-detect/tasks/main.yml
new file mode 100644
index 000000000..e8dd0acf0
--- /dev/null
+++ b/roles/dns-server-detect/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+
+- fail:
+ msg: 'Missing required private DNS server(s)'
+ when:
+ - external_nsupdate_keys['private'] is undefined
+ - hostvars[groups['dns'][0]] is undefined
+
+- fail:
+ msg: 'Missing required public DNS server(s)'
+ when:
+ - external_nsupdate_keys['public'] is undefined
+ - hostvars[groups['dns'][0]] is undefined
+
+- name: "Set the private DNS server to use the external value (if provided)"
+ set_fact:
+ private_dns_server: "{{ external_nsupdate_keys['private']['server'] }}"
+ when:
+ - external_nsupdate_keys['private'] is defined
+
+- name: "Set the private DNS server to use the provisioned value"
+ set_fact:
+ private_dns_server: "{{ hostvars[groups['dns'][0]].openstack.private_v4 }}"
+ when:
+ - private_dns_server is undefined
+
+- name: "Set the public DNS server to use the external value (if provided)"
+ set_fact:
+ public_dns_server: "{{ external_nsupdate_keys['public']['server'] }}"
+ when:
+ - external_nsupdate_keys['public'] is defined
+
+- name: "Set the public DNS server to use the provisioned value"
+ set_fact:
+ public_dns_server: "{{ hostvars[groups['dns'][0]].openstack.public_v4 }}"
+ when:
+ - public_dns_server is undefined
+
diff --git a/roles/hostnames/tasks/main.yaml b/roles/hostnames/tasks/main.yaml
new file mode 100644
index 000000000..bf142d653
--- /dev/null
+++ b/roles/hostnames/tasks/main.yaml
@@ -0,0 +1,26 @@
+---
+- name: Setting Hostname Fact
+ set_fact:
+ new_hostname: "{{ custom_hostname | default(inventory_hostname_short) }}"
+
+- name: Setting FQDN Fact
+ set_fact:
+ new_fqdn: "{{ new_hostname }}.{{ full_dns_domain }}"
+
+- name: Setting hostname and DNS domain
+ hostname: name="{{ new_fqdn }}"
+
+- name: Check for cloud.cfg
+ stat: path=/etc/cloud/cloud.cfg
+ register: cloud_cfg
+
+- name: Prevent cloud-init updates of hostname/fqdn (if applicable)
+ lineinfile:
+ dest: /etc/cloud/cloud.cfg
+ state: present
+ regexp: "{{ item.regexp }}"
+ line: "{{ item.line }}"
+ with_items:
+ - { regexp: '^ - set_hostname', line: '# - set_hostname' }
+ - { regexp: '^ - update_hostname', line: '# - update_hostname' }
+ when: cloud_cfg.stat.exists == True
diff --git a/roles/hostnames/test/inv b/roles/hostnames/test/inv
new file mode 100644
index 000000000..ffbe6e03d
--- /dev/null
+++ b/roles/hostnames/test/inv
@@ -0,0 +1,12 @@
+[all:vars]
+dns_domain=example.com
+
+[openshift_masters]
+192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41
+192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117
+
+[openshift_nodes]
+192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40
+
+#[dns]
+#192.168.124.117 dns_private_ip=1.1.1.117
diff --git a/roles/hostnames/test/roles b/roles/hostnames/test/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/roles/hostnames/test/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/roles/hostnames/test/test.retry b/roles/hostnames/test/test.retry
new file mode 100644
index 000000000..63fc08e4c
--- /dev/null
+++ b/roles/hostnames/test/test.retry
@@ -0,0 +1,3 @@
+192.168.124.117
+192.168.124.40
+192.168.124.41
diff --git a/roles/hostnames/test/test.yaml b/roles/hostnames/test/test.yaml
new file mode 100644
index 000000000..34bf37942
--- /dev/null
+++ b/roles/hostnames/test/test.yaml
@@ -0,0 +1,21 @@
+---
+- hosts: all
+ roles:
+ - role: hostnames
+
+# - debug:
+#
+# - hosts: dns
+# roles:
+# - role: dns-server
+# named_config_views:
+# - name: private
+# acl_entry:
+# - 192.168.124.40/32
+# - 192.168.124.40/32
+# zone:
+# - dns_domain: example.com
+# - name: public
+# zone:
+# - dns_domain: example.com
+# - role: dns
diff --git a/roles/hostnames/vars/main.yaml b/roles/hostnames/vars/main.yaml
new file mode 100644
index 000000000..3eecb8dc4
--- /dev/null
+++ b/roles/hostnames/vars/main.yaml
@@ -0,0 +1,2 @@
+---
+counter: 1
diff --git a/roles/hostnames/vars/records.yaml b/roles/hostnames/vars/records.yaml
new file mode 100644
index 000000000..3bf12ae2b
--- /dev/null
+++ b/roles/hostnames/vars/records.yaml
@@ -0,0 +1,28 @@
+---
+ - name: "Building Records"
+ set_fact:
+ dns_records_add:
+ - view: private
+ zone: example.com
+ entries:
+ - type: A
+ hostname: master1.example.com
+ ip: 172.16.15.94
+ - type: A
+ hostname: node1.example.com
+ ip: 172.16.15.86
+ - type: A
+ hostname: node2.example.com
+ ip: 172.16.15.87
+ - view: public
+ zone: example.com
+ entries:
+ - type: A
+ hostname: master1.example.com
+ ip: 10.3.10.116
+ - type: A
+ hostname: node1.example.com
+ ip: 10.3.11.46
+ - type: A
+ hostname: node2.example.com
+ ip: 10.3.12.6
diff --git a/roles/openshift-prep/tasks/main.yml b/roles/openshift-prep/tasks/main.yml
new file mode 100644
index 000000000..5e484e75f
--- /dev/null
+++ b/roles/openshift-prep/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+# Starting Point for OpenShift Installation and Configuration
+- include: prerequisites.yml
+ tags: [prerequisites]
diff --git a/roles/openshift-prep/tasks/prerequisites.yml b/roles/openshift-prep/tasks/prerequisites.yml
new file mode 100644
index 000000000..1286905f4
--- /dev/null
+++ b/roles/openshift-prep/tasks/prerequisites.yml
@@ -0,0 +1,36 @@
+---
+- name: "Cleaning yum repositories"
+ command: "yum clean all"
+
+- name: "Install required packages"
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - wget
+ - git
+ - net-tools
+ - bind-utils
+ - bridge-utils
+ - bash-completion
+ - atomic-openshift-utils
+ - vim-enhanced
+
+- name: "Update all packages (this can take a very long time)"
+ yum:
+ name: "*"
+ state: latest
+
+- name: "Verify hostname"
+ shell: hostnamectl status | awk "/Static hostname/"'{ print $3 }'
+ register: hostname_fqdn
+
+- name: "Set hostname if required"
+ hostname:
+ name: "{{ ansible_fqdn }}"
+ when: hostname_fqdn.stdout != ansible_fqdn
+
+- name: "Verify SELinux is enforcing"
+ fail:
+ msg: "SELinux is required for OpenShift and has been detected as '{{ ansible_selinux.config_mode }}'"
+ when: ansible_selinux.config_mode != "enforcing"
diff --git a/roles/openstack-stack/README.md b/roles/openstack-stack/README.md
new file mode 100644
index 000000000..509c9de6c
--- /dev/null
+++ b/roles/openstack-stack/README.md
@@ -0,0 +1,9 @@
+# Role openstack-stack
+
+Role for spinning up instances using OpenStack Heat.
+
+## To Test
+
+```
+ansible-playbook casl-ansible/roles/openstack-stack/test/stack-create-test.yml
+```
diff --git a/roles/openstack-stack/defaults/main.yml b/roles/openstack-stack/defaults/main.yml
new file mode 100644
index 000000000..2a4ef3a45
--- /dev/null
+++ b/roles/openstack-stack/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+dns_volume_size: 1
+ssh_ingress_cidr: 0.0.0.0/0
+node_ingress_cidr: 0.0.0.0/0
+master_ingress_cidr: 0.0.0.0/0
+lb_ingress_cidr: 0.0.0.0/0
+num_etcd: 0
+num_masters: 1
+num_nodes: 1
+num_dns: 1
+num_infra: 1
+etcd_volume_size: 2
diff --git a/roles/openstack-stack/tasks/main.yml b/roles/openstack-stack/tasks/main.yml
new file mode 100644
index 000000000..71c7bbe0d
--- /dev/null
+++ b/roles/openstack-stack/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: create HOT stack template prefix
+ register: stack_template_pre
+ tempfile:
+ state: directory
+ prefix: casl-ansible
+
+- name: set template paths
+ set_fact:
+ stack_template_path: "{{ stack_template_pre.path }}/stack.yaml"
+ server_template_path: "{{ stack_template_pre.path }}/server.yaml"
+ user_data_template_path: "{{ stack_template_pre.path }}/user-data"
+
+- name: generate HOT stack template from jinja2 template
+ template:
+ src: heat_stack.yaml.j2
+ dest: "{{ stack_template_path }}"
+
+- name: generate HOT server template from jinja2 template
+ template:
+ src: heat_stack_server.yaml.j2
+ dest: "{{ server_template_path }}"
+
+- name: generate user_data from jinja2 template
+ template:
+ src: user_data.j2
+ dest: "{{ user_data_template_path }}"
+
+- name: create stack
+ ignore_errors: False
+ register: stack_create
+ os_stack:
+ name: "{{ stack_name }}"
+ state: present
+ template: "{{ stack_template_path }}"
+ wait: yes
+
+- name: cleanup temp files
+ file:
+ path: "{{ stack_template_pre.path }}"
+ state: absent
diff --git a/roles/openstack-stack/templates/heat_stack.yaml.j2 b/roles/openstack-stack/templates/heat_stack.yaml.j2
new file mode 100644
index 000000000..09b62cba7
--- /dev/null
+++ b/roles/openstack-stack/templates/heat_stack.yaml.j2
@@ -0,0 +1,620 @@
+heat_template_version: 2016-10-14
+
+description: OpenShift cluster
+
+parameters:
+
+outputs:
+
+ etcd_names:
+ description: Name of the etcds
+ value: { get_attr: [ etcd, name ] }
+
+ etcd_ips:
+ description: IPs of the etcds
+ value: { get_attr: [ etcd, private_ip ] }
+
+ etcd_floating_ips:
+ description: Floating IPs of the etcds
+ value: { get_attr: [ etcd, floating_ip ] }
+
+ master_names:
+ description: Name of the masters
+ value: { get_attr: [ masters, name ] }
+
+ master_ips:
+ description: IPs of the masters
+ value: { get_attr: [ masters, private_ip ] }
+
+ master_floating_ips:
+ description: Floating IPs of the masters
+ value: { get_attr: [ masters, floating_ip ] }
+
+ node_names:
+ description: Name of the nodes
+ value: { get_attr: [ compute_nodes, name ] }
+
+ node_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ compute_nodes, private_ip ] }
+
+ node_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ compute_nodes, floating_ip ] }
+
+ infra_names:
+ description: Name of the nodes
+ value: { get_attr: [ infra_nodes, name ] }
+
+ infra_ips:
+ description: IPs of the nodes
+ value: { get_attr: [ infra_nodes, private_ip ] }
+
+ infra_floating_ips:
+ description: Floating IPs of the nodes
+ value: { get_attr: [ infra_nodes, floating_ip ] }
+
+ dns_name:
+ description: Name of the DNS
+ value:
+ get_attr:
+ - dns
+ - name
+
+ dns_floating_ip:
+ description: Floating IP of the DNS
+ value:
+ get_attr:
+ - dns
+ - addresses
+ - str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ - 1
+ - addr
+
+resources:
+
+ net:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-subnet
+ params:
+ cluster_id: {{ stack_name }}
+ network: { get_resource: net }
+ cidr:
+ str_replace:
+ template: subnet_24_prefix.0/24
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ allocation_pools:
+ - start:
+ str_replace:
+ template: subnet_24_prefix.3
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ end:
+ str_replace:
+ template: subnet_24_prefix.254
+ params:
+ subnet_24_prefix: {{ subnet_prefix }}
+ dns_nameservers:
+ {% for nameserver in dns_nameservers %}
+ - {{ nameserver }}
+ {% endfor %}
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-router
+ params:
+ cluster_id: {{ stack_name }}
+ external_gateway_info:
+ network: {{ external_network }}
+
+ interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: subnet }
+
+# keypair:
+# type: OS::Nova::KeyPair
+# properties:
+# name:
+# str_replace:
+# template: openshift-ansible-cluster_id-keypair
+# params:
+# cluster_id: {{ stack_name }}
+# public_key: {{ ssh_public_key }}
+
+ master-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-master-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster master
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 4001
+ port_range_max: 4001
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8443
+ port_range_max: 8443
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8444
+ port_range_max: 8444
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: udp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2224
+ port_range_max: 2224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5404
+ port_range_max: 5404
+ - direction: ingress
+ protocol: udp
+ port_range_min: 5405
+ port_range_max: 5405
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 9090
+ port_range_max: 9090
+
+ etcd-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-etcd-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id etcd cluster
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2379
+ port_range_max: 2379
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 2380
+ port_range_max: 2380
+ remote_mode: remote_group_id
+
+ node-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-node-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift cluster nodes
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 10255
+ port_range_max: 10255
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 30000
+ port_range_max: 32767
+ remote_ip_prefix: {{ node_ingress_cidr }}
+
+ infra-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-infra-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id OpenShift infrastructure cluster nodes
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 80
+ port_range_max: 80
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 443
+ port_range_max: 443
+
+ dns-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: openshift-ansible-cluster_id-dns-secgrp
+ params:
+ cluster_id: {{ stack_name }}
+ description:
+ str_replace:
+ template: Security group for cluster_id cluster DNS
+ params:
+ cluster_id: {{ stack_name }}
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: {{ node_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ remote_ip_prefix: {{ node_ingress_cidr }}
+{% if num_masters is greaterthan 1 %}
+ lb-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name: openshift-ansible-{{ stack_name }}-lb-secgrp
+ description: Security group for {{ stack_name }} cluster Load Balancer
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: {{ ssh_ingress_cidr }}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_api_port | default(8443) }}
+ port_range_max: {{ openshift_master_api_port | default(8443) }}
+ remote_ip_prefix: {{ lb_ingress_cidr }}
+ {% if openshift_master_console_port is defined and openshift_master_console_port is not equalto openshift_master_api_port %}
+ - direction: ingress
+ protocol: tcp
+ port_range_min: {{ openshift_master_console_port | default(8443) }}
+ port_range_max: {{ openshift_master_console_port | default(8443) }}
+ remote_ip_prefix: {{ lb_ingress_cidr }}
+ {% endif %}
+{% endif %}
+
+ etcd:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_etcd }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: etcd
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: etcds
+ cluster_id: {{ stack_name }}
+ type: etcd
+ image: {{ openstack_image }}
+ flavor: {{ etcd_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: etcd-secgrp }
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: {{ etcd_volume_size }}
+ depends_on:
+ - interface
+
+{% if num_masters is greaterthan 1 %}
+ loadbalancer:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: 1
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: lb
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: lb
+ cluster_id: {{ stack_name }}
+ type: lb
+ image: {{ openstack_image }}
+ flavor: {{ lb_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: lb-secgrp }
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: 5
+ depends_on:
+ - interface
+{% endif %}
+
+ masters:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_masters }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: master
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: masters
+ cluster_id: {{ stack_name }}
+ type: master
+ image: {{ openstack_image }}
+ flavor: {{ master_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: master-secgrp }
+ - { get_resource: node-secgrp }
+{% if num_etcd is equalto 0 %}
+ - { get_resource: etcd-secgrp }
+{% endif %}
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: {{ master_volume_size }}
+ depends_on:
+ - interface
+
+ compute_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_nodes }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: subtype-k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: node
+ subtype: app
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: nodes
+ cluster_id: {{ stack_name }}
+ type: node
+ subtype: app
+ node_labels:
+ region: primary
+ image: {{ openstack_image }}
+ flavor: {{ node_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: {{ app_volume_size }}
+ depends_on:
+ - interface
+
+ infra_nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_infra }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: subtypek8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: node
+ subtype: infra
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: infra
+ cluster_id: {{ stack_name }}
+ type: node
+ subtype: infra
+ node_labels:
+ region: infra
+ image: {{ openstack_image }}
+ flavor: {{ infra_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ - { get_resource: infra-secgrp }
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: {{ infra_volume_size }}
+ depends_on:
+ - interface
+
+ dns:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: {{ num_dns }}
+ resource_def:
+ type: server.yaml
+ properties:
+ name:
+ str_replace:
+ template: k8s_type-%index%.cluster_id
+ params:
+ cluster_id: {{ stack_name }}
+ k8s_type: dns
+ cluster_env: {{ public_dns_domain }}
+ cluster_id: {{ stack_name }}
+ group:
+ str_replace:
+ template: k8s_type.cluster_id
+ params:
+ k8s_type: dns
+ cluster_id: {{ stack_name }}
+ type: dns
+ image: {{ openstack_image }}
+ flavor: {{ dns_flavor }}
+ key_name: {{ ssh_public_key }}
+ net: { get_resource: net }
+ subnet: { get_resource: subnet }
+ secgrp:
+ - { get_resource: node-secgrp }
+ - { get_resource: dns-secgrp }
+ floating_network: {{ external_network }}
+ net_name:
+ str_replace:
+ template: openshift-ansible-cluster_id-net
+ params:
+ cluster_id: {{ stack_name }}
+ volume_size: {{ dns_volume_size }}
+ depends_on:
+ - interface
+
diff --git a/roles/openstack-stack/templates/heat_stack_server.yaml.j2 b/roles/openstack-stack/templates/heat_stack_server.yaml.j2
new file mode 100644
index 000000000..5851d3b9b
--- /dev/null
+++ b/roles/openstack-stack/templates/heat_stack_server.yaml.j2
@@ -0,0 +1,170 @@
+heat_template_version: 2016-10-14
+
+description: OpenShift cluster server
+
+parameters:
+
+ name:
+ type: string
+ label: Name
+ description: Name
+
+ group:
+ type: string
+ label: Host Group
+ description: The Primary Ansible Host Group
+ default: host
+
+ cluster_env:
+ type: string
+ label: Cluster environment
+ description: Environment of the cluster
+
+ cluster_id:
+ type: string
+ label: Cluster ID
+ description: Identifier of the cluster
+
+ type:
+ type: string
+ label: Type
+ description: Type master or node
+
+ subtype:
+ type: string
+ label: Sub-type
+ description: Sub-type compute or infra for nodes, default otherwise
+ default: default
+
+ key_name:
+ type: string
+ label: Key name
+ description: Key name of keypair
+
+ image:
+ type: string
+ label: Image
+ description: Name of the image
+
+ flavor:
+ type: string
+ label: Flavor
+ description: Name of the flavor
+
+ net:
+ type: string
+ label: Net ID
+ description: Net resource
+
+ net_name:
+ type: string
+ label: Net name
+ description: Net name
+
+ subnet:
+ type: string
+ label: Subnet ID
+ description: Subnet resource
+
+ secgrp:
+ type: comma_delimited_list
+ label: Security groups
+ description: Security group resources
+
+ floating_network:
+ type: string
+ label: Floating network
+ description: Network to allocate floating IP from
+
+ availability_zone:
+ type: string
+ description: The Availability Zone to launch the instance.
+ default: nova
+
+ volume_size:
+ type: number
+ description: Size of the volume to be created.
+ default: 1
+ constraints:
+ - range: { min: 1, max: 1024 }
+ description: must be between 1 and 1024 Gb.
+
+ node_labels:
+ type: json
+ description: OpenShift Node Labels
+ default: {"region": "default" }
+
+outputs:
+
+ name:
+ description: Name of the server
+ value: { get_attr: [ server, name ] }
+
+ private_ip:
+ description: Private IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+ - 0
+ - addr
+
+ floating_ip:
+ description: Floating IP of the server
+ value:
+ get_attr:
+ - server
+ - addresses
+ - { get_param: net_name }
+ - 1
+ - addr
+
+resources:
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: name }
+ key_name: { get_param: key_name }
+ image: { get_param: image }
+ flavor: { get_param: flavor }
+ networks:
+ - port: { get_resource: port }
+ user_data:
+ get_file: user-data
+ user_data_format: RAW
+ metadata:
+ group: { get_param: group }
+ environment: { get_param: cluster_env }
+ clusterid: { get_param: cluster_id }
+ host-type: { get_param: type }
+ sub-host-type: { get_param: subtype }
+ node_labels: { get_param: node_labels }
+
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: net }
+ fixed_ips:
+ - subnet: { get_param: subnet }
+ security_groups: { get_param: secgrp }
+
+ floating-ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: { get_param: floating_network }
+ port_id: { get_resource: port }
+
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: { get_param: volume_size }
+ availability_zone: { get_param: availability_zone }
+
+ volume_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: cinder_volume }
+ instance_uuid: { get_resource: server }
+ mountpoint: /dev/sdb
diff --git a/roles/openstack-stack/templates/user_data.j2 b/roles/openstack-stack/templates/user_data.j2
new file mode 100644
index 000000000..eb65f7cec
--- /dev/null
+++ b/roles/openstack-stack/templates/user_data.j2
@@ -0,0 +1,13 @@
+#cloud-config
+disable_root: true
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
diff --git a/roles/openstack-stack/test/roles b/roles/openstack-stack/test/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/roles/openstack-stack/test/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/roles/openstack-stack/test/stack-create-test.yml b/roles/openstack-stack/test/stack-create-test.yml
new file mode 100644
index 000000000..6cbd7ff30
--- /dev/null
+++ b/roles/openstack-stack/test/stack-create-test.yml
@@ -0,0 +1,17 @@
+---
+- hosts: localhost
+ roles:
+ - role: openstack-stack
+ stack_name: test-stack
+ dns_domain: "{{ public_dns_domain }}"
+ dns_nameservers: "{{ public_dns_nameservers }}"
+ subnet_prefix: "{{ openstack_subnet_prefix }}"
+ ssh_public_key: "{{ openstack_ssh_public_key }}"
+ openstack_image: "{{ openstack_default_image_name }}"
+ etcd_flavor: "{{ openstack_default_flavor }}"
+ master_flavor: "{{ openstack_default_flavor }}"
+ node_flavor: "{{ openstack_default_flavor }}"
+ infra_flavor: "{{ openstack_default_flavor }}"
+ dns_flavor: "{{ openstack_default_flavor }}"
+ external_network: "{{ openstack_external_network_name }}"
+
diff --git a/roles/subscription-manager/README.md b/roles/subscription-manager/README.md
new file mode 100644
index 000000000..748de282c
--- /dev/null
+++ b/roles/subscription-manager/README.md
@@ -0,0 +1,156 @@
+# Red Hat Subscription Manager Ansible Role
+
+## Parameters
+
+This role depends on user specified variables. These can be set in the inventory file, group_vars or passed to the playbook from the CLI. No values are set by default which disables this role. The variables are:
+
+### rhsm_satellite
+
+Subscription Manager server hostname. If using a Satellite server set the FQDN here. If using RHSM Hosted this value must be left blank, none or false.
+
+Default: none
+
+### rhsm_username
+
+Subscription Manager username. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this.
+
+Default: none
+
+### rhsm_password
+
+Subscription Manager password. Required for RHSM Hosted. Can be optionally used for Satellite, but it may be better to use **rhsm_activationkey** for this.
+
+NOTE: If this variable is specified on the command-line or set in a variable file it may leave your password exposed. For this reason you may perfer to use an Activation Key if using Satellite. For RHSM Hosted, your password must be specified. There are two ways to provide the password to the Ansible playbook without exposing it to prying eyes.
+
+1. The first method is to use a **vars_prompt** to collect the password up front one time for the playbook. Ansible will not display the password if the prompt is configured as **private** and the task will not display the password on the CLI. This is the a good method as it supports automating the task to every host with only one password entry. To enable **vars_prompt** add the following to the very top of your playbook after the **hosts** declaration and before any **pre_tasks** section:
+
+ ```
+ - hosts: localhost
+ # Add the following lines after a -hosts: declaration and before pre_tasks:
+ # Start of vars_prompt code block
+ vars_prompt:
+ - name: "rhsm_password"
+ prompt: "Subscription Manager password"
+ confirm: yes
+ private: yes
+ # End of vars_prompt code block
+ pre_tasks:
+ ```
+
+2. A second method is to use an encrypted file via **ansible-vault**. This does does not require modifying any code as the previous method, but does require more work to create and encrypt the file. To accomplish this, first create a file containing at least the **rhsm_password** variable (it is also possible to specify additional variables to encrypt them all as well):
+ 1. Create a file to contain the variable such as **secrets.yml**:
+
+ ```
+ ---
+ rhsm_password: "my_secret_password"
+ # other variables can optionally be placed here as well
+ ```
+
+ 2. Encrypt the file with **ansible-vault**:
+
+ ```
+ $ ansible-vault encrypt secrets.yml
+ Vault password:
+ Confirm Vault password:
+ Encryption successful
+ ```
+
+ 3. When executing **ansible-playbook** specify **--ask-vault-pass** to be prompted for the decryption password, and also specify the location of the **secrets.yml** as such:
+
+ ```
+ $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" <other playbook options>
+ ```
+
+ NOTE: Optionally the file containing the encrypted variables can be decrypted with **ansible-vault** and the **--ask-vault-pass** option omitted to prevent any password prompting (for automated runs) and the file can be encrypted after the run. This can be used if an external system such as Jenkins would handle the decryption/encryption outside of Ansible.
+
+Default: none
+
+### rhsm_org
+
+Optional Subscription Manager Satellite Organization. Required for Satellite, ignored if using RHSM Hosted.
+
+Default: none
+
+### rhsm_activationkey
+
+Optional Subscription Manager Satellite Activation Key, use this instead of **rhsm_username** and **rhsm_password** if using Satellite to provide repositories and authentication in a key instead.
+
+Default: none
+
+### rhsm_pool
+
+Optional Subscription Manager pool, determine this by running **subscription-manager list --available** on a registered system. Valid for RHSM Hosted or Satellite. Specifying **rhsm_activationkey** will ignore this option.
+
+Default: none
+
+### rhsm_repos
+
+Optional list of repositories to enable. If left blank it is expected that the **rhsm_activationkey** will specify repos instead. If populated, a **subscription-manager repos --disable=\*** will be run and each of the specified repos explicitly enabled. Valid for RHSM Hosted or Satellite
+
+NOTE: If specifying this value in an inventory file as opposed to group_vars, be sure to define it as a proper list as such:
+
+rhsm_repos='["rhel-7-server-rpms", "rhel-7-server-ose-3.1-rpms", "rhel-7-server-extras-rpms"]'
+
+Default: none
+
+## Calling This Role
+Calling this role is done at both **pre_tasks** and **roles** sections of a playbook and optionally a **vars_prompt**.
+
+### vars_prompt
+Unfortunately **vars_prompt** can only be used at the play level before role tasks are executed, so this is the only place it can go. It also cannot be shown conditionally. For this reason it is not included in this role by default. A better method may be using a file containing the password variable encrypted with **ansible-vault**. See the **rhsm_password** section for more details.
+
+To Add a prompt to capture **rhsm_password**:
+
+```
+- hosts: localhost
+ # Add the following lines after a -hosts: declaration and before pre_tasks:
+ # Start of vars_prompt code block
+ vars_prompt:
+ - name: "rhsm_password"
+ prompt: "Subscription Manager password"
+ confirm: yes
+ private: yes
+ # End of vars_prompt code block
+ pre_tasks:
+```
+
+### pre-tasks
+
+A number of variable checks are performed before any tasks to ensure the proper parameters are set. To include these checks call the pre_task yaml before any roles:
+
+```
+pre_tasks:
+- include: roles/subscription-manager/pre_tasks/pre_tasks.yml
+```
+
+### roles
+
+The bulk of the work is performed in the main.yml for this role. The pre-task play will set a variable which can be checked to contitionally include this role as such:
+
+```
+roles:
+ - { role: subscription-manager, when: hostvars.localhost.rhsm_register, tags: 'subscription-manager' }
+```
+
+## Running Playbooks with this Role
+
+- To register to RHSM Hosted or Satellite with a username and plain text password (NOTE: This may retain your password in your CLI history):
+
+ ```
+ $ ansible-playbook --extra-vars="rhsm_username=vvaldez rhsm_password=my_secret_password <other playbook otions>"
+ ```
+
+- To register to RHSM Hosted or Satellite with username and an encrypted file containing the password:
+
+ ```
+ $ ansible-playbook --ask-vault-pass --extra-vars=@secrets.yml --extra-vars="rhsm_username=myusername" <other playbook options>
+
+ ```
+
+- To register to a Satellite server with an activation key:
+
+ ```
+ $ ansible-playbook --extra-vars="rhsm_satellite=satellite.example.com rhsm_org=example_org rhsm_activationkey=rhel-7-ose-3-1 <other playbook options>"
+
+ ```
+- To ignore any Subscription Manager activities, simply do not set any parameters.
diff --git a/roles/subscription-manager/pre_tasks/pre_tasks.yml b/roles/subscription-manager/pre_tasks/pre_tasks.yml
new file mode 100644
index 000000000..b21356cf2
--- /dev/null
+++ b/roles/subscription-manager/pre_tasks/pre_tasks.yml
@@ -0,0 +1,45 @@
+---
+- name: "Set password fact"
+ set_fact:
+ rhsm_password: "{{ rhsm_password | default(None) }}"
+ no_log: true
+
+- name: "Initialize Subscription Manager fact"
+ set_fact:
+ rhsm_register: true
+
+- name: "Determine if Subscription Manager should be used"
+ set_fact:
+ rhsm_register: false
+ when:
+ - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == ''
+ - rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == ''
+ - rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == ''
+ - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == ''
+ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == ''
+ - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == ''
+
+- name: "Validate Subscription Manager organization is set"
+ fail: msg="Cannot register to a Satellite server without a value for the Organization via 'rhsm_org'"
+ when:
+ - rhsm_org is undefined or rhsm_org is none or rhsm_org|trim == ''
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+ - rhsm_register
+
+- name: "Validate Subscription Manager authentication is defined"
+ fail: msg="Cannot register without ('rhsm_username' and 'rhsm_password') or 'rhsm_activationkey' variables set. See the README.md for details on securely prompting for a password"
+ when:
+ - (rhsm_username is undefined or rhsm_username is none or rhsm_username|trim == '') or (rhsm_password is undefined or rhsm_password is none or rhsm_password|trim == '')
+ - rhsm_activationkey is undefined or rhsm_activationkey is none or rhsm_activationkey|trim == ''
+ - rhsm_register
+
+- name: "Validate activation key and Hosted are not requested together"
+ fail: msg="Cannot register to RHSM Hosted with 'rhsm_activationkey'"
+ when:
+ - rhsm_satellite is undefined or rhsm_satellite is none or rhsm_satellite|trim == ''
+ - rhsm_activationkey is defined
+ - rhsm_activationkey is not none
+ - rhsm_activationkey|trim != ''
+ - rhsm_register
diff --git a/roles/subscription-manager/tasks/main.yml b/roles/subscription-manager/tasks/main.yml
new file mode 100644
index 000000000..2dd14b48e
--- /dev/null
+++ b/roles/subscription-manager/tasks/main.yml
@@ -0,0 +1,122 @@
+---
+- name: "Initialize rhsm_password variable if vars_prompt was used"
+ set_fact:
+ rhsm_password: "{{ hostvars.localhost.rhsm_password }}"
+ when:
+ - rhsm_password is not defined or rhsm_password is none or rhsm_password|trim == ''
+
+- name: "Initializing Subscription Manager authentication method"
+ set_fact:
+ rhsm_authentication: false
+
+# 'rhsm_activationkey' will take precedence even if 'rhsm_username' and 'rhsm_password' are also set
+- name: "Setting Subscription Manager Activation Key Fact"
+ set_fact:
+ rhsm_authentication: "key"
+ when:
+ - rhsm_activationkey is defined
+ - rhsm_activationkey is not none
+ - rhsm_activationkey|trim != ''
+ - not rhsm_authentication
+
+# If 'rhsm_username' and 'rhsm_password' are set but not 'rhsm_activationkey', set 'rhsm_authentication' to password
+- name: "Setting Subscription Manager Username and Password Fact"
+ set_fact:
+ rhsm_authentication: "password"
+ when:
+ - rhsm_username is defined
+ - rhsm_username is not none
+ - rhsm_username|trim != ''
+ - rhsm_password is defined
+ - rhsm_password is not none
+ - rhsm_password|trim != ''
+ - not rhsm_authentication
+
+- name: "Initializing registration status"
+ set_fact:
+ registered: false
+
+- name: "Checking subscription status (a failure means it is not registered and will be)"
+ command: "/usr/bin/subscription-manager status"
+ ignore_errors: yes
+ changed_when: no
+ register: check_if_registered
+
+- name: "Set registration fact if system is already registered"
+ set_fact:
+ registered: true
+ when: check_if_registered.rc == 0
+
+- name: "Cleaning any old subscriptions"
+ command: "/usr/bin/subscription-manager clean"
+ when:
+ - not registered
+ - rhsm_authentication is defined
+
+- name: "Install Satellite certificate"
+ command: "rpm -Uvh --force http://{{ rhsm_satellite }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ when:
+ - not registered
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+
+- name: "Register to Satellite using activation key"
+ command: "/usr/bin/subscription-manager register --activationkey={{ rhsm_activationkey }} --org='{{ rhsm_org }}'"
+ when:
+ - not registered
+ - rhsm_authentication == 'key'
+ - rhsm_satellite is defined
+ - rhsm_satellite is not none
+ - rhsm_satellite|trim != ''
+
+# This can apply to either Hosted or Satellite
+- name: "Register using username and password"
+ command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }}"
+ no_log: true
+ when:
+ - not registered
+ - rhsm_authentication == "password"
+ - rhsm_org is not defined or rhsm_org is none or rhsm_org|trim == ''
+
+# This can apply to either Hosted or Satellite
+- name: "Register using username, password and organization"
+ command: "/usr/bin/subscription-manager register --username={{ rhsm_username }} --password={{ rhsm_password }} --org={{ rhsm_org }}"
+ no_log: true
+ when:
+ - not registered
+ - rhsm_authentication == "password"
+ - rhsm_org is defined
+ - rhsm_org is not none
+ - rhsm_org|trim != ''
+
+- name: "Auto-attach to Subscription Manager Pool"
+ command: "/usr/bin/subscription-manager attach --auto"
+ when:
+ - not registered
+ - rhsm_pool is undefined or rhsm_pool is none or rhsm_pool|trim == ''
+
+- name: "Attach to a specific pool"
+ command: "/usr/bin/subscription-manager attach --pool={{ rhsm_pool }}"
+ when:
+ - rhsm_pool is defined
+ - rhsm_pool is not none
+ - rhsm_pool|trim != ''
+ - not registered
+
+- name: "Disable all repositories"
+ command: "/usr/bin/subscription-manager repos --disable=*"
+ when:
+ - not registered
+ - rhsm_repos is defined
+ - rhsm_repos is not none
+ - rhsm_repos|trim != ''
+
+- name: "Enable specified repositories"
+ command: "/usr/bin/subscription-manager repos --enable={{ item }}"
+ with_items: "{{ rhsm_repos }}"
+ when:
+ - not registered
+ - rhsm_repos is defined
+ - rhsm_repos is not none
+ - rhsm_repos|trim != ''