summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/flannel/README.md45
-rw-r--r--roles/flannel/defaults/main.yaml8
-rw-r--r--roles/flannel/handlers/main.yml8
-rw-r--r--roles/flannel/meta/main.yml16
-rw-r--r--roles/flannel/tasks/main.yml43
-rw-r--r--roles/flannel_register/README.md47
-rw-r--r--roles/flannel_register/defaults/main.yaml11
-rw-r--r--roles/flannel_register/meta/main.yml16
-rw-r--r--roles/flannel_register/tasks/main.yml14
-rw-r--r--roles/flannel_register/templates/flannel-config.json8
-rw-r--r--roles/haproxy/README.md34
-rw-r--r--roles/haproxy/defaults/main.yml21
-rw-r--r--roles/haproxy/handlers/main.yml5
-rw-r--r--roles/haproxy/meta/main.yml14
-rw-r--r--roles/haproxy/tasks/main.yml25
-rw-r--r--roles/haproxy/templates/haproxy.cfg.j276
-rw-r--r--roles/kube_nfs_volumes/README.md3
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml6
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml13
l---------roles/kube_nfs_volumes/templates/v1/nfs.json.j21
-rw-r--r--roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2 (renamed from roles/kube_nfs_volumes/templates/nfs.json.j2)0
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py39
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml4
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml16
-rw-r--r--roles/openshift_common/tasks/main.yml5
-rw-r--r--roles/openshift_examples/defaults/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh8
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/tasks/main.yml16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py144
-rw-r--r--roles/openshift_facts/tasks/main.yml7
-rw-r--r--roles/openshift_master/handlers/main.yml10
-rw-r--r--roles/openshift_master/tasks/main.yml115
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-api.service.j221
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.j29
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master-controllers.service.j222
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j236
-rw-r--r--roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j27
-rw-r--r--roles/openshift_master/vars/main.yml1
-rw-r--r--roles/openshift_master_ca/tasks/main.yml2
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml24
-rw-r--r--roles/openshift_master_cluster/tasks/configure_deferred.yml8
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml5
-rw-r--r--roles/openshift_node/meta/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml15
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml46
52 files changed, 1441 insertions, 83 deletions
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
new file mode 100644
index 000000000..b8aa830ac
--- /dev/null
+++ b/roles/flannel/README.md
@@ -0,0 +1,45 @@
+Role Name
+=========
+
+Configure flannel on openshift nodes
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|-----------------------------------------|-----------------------------------------------|
+| flannel_interface | ansible_default_ipv4.interface | interface to use for inter-host communication |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/node | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | Openshift SSL cert | SSL cert to use for etcd |
+| etcd_peer_key_file | Openshift SSL key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_node
+ roles:
+ - { role: flannel, etcd_urls: ['https://127.0.0.1:2379'] }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel/defaults/main.yaml b/roles/flannel/defaults/main.yaml
new file mode 100644
index 000000000..34cebda9c
--- /dev/null
+++ b/roles/flannel/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+flannel_interface: "{{ ansible_default_ipv4.interface }}"
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/node"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/{{ 'ca' if (embedded_etcd | bool) else 'node.etcd-ca' }}.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/{{ 'system:node:' + openshift.common.hostname if (embedded_etcd | bool) else 'node.etcd-client' }}.key"
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
new file mode 100644
index 000000000..f9b9ae7f1
--- /dev/null
+++ b/roles/flannel/handlers/main.yml
@@ -0,0 +1,8 @@
+---
+- name: restart flanneld
+ sudo: true
+ service: name=flanneld state=restarted
+
+- name: restart docker
+ sudo: true
+ service: name=docker state=restarted
diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml
new file mode 100644
index 000000000..909bdbfa4
--- /dev/null
+++ b/roles/flannel/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: flannel management
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
new file mode 100644
index 000000000..acfb009ec
--- /dev/null
+++ b/roles/flannel/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Install flannel
+ sudo: true
+ yum: pkg=flannel state=present
+
+- name: Set flannel etcd url
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD=)"
+ line: '\1{{ etcd_hosts|join(",") }}'
+
+- name: Set flannel etcd key
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^(FLANNEL_ETCD_KEY=)"
+ line: '\1{{ flannel_etcd_key }}'
+
+- name: Set flannel options
+ sudo: true
+ lineinfile:
+ dest: /etc/sysconfig/flanneld
+ backrefs: yes
+ regexp: "^#?(FLANNEL_OPTIONS=)"
+ line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
+
+- name: Enable flanneld
+ sudo: true
+ service:
+ name: flanneld
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Remove docker bridge ip
+ sudo: true
+ shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
+ notify:
+ - restart docker
+ - restart node
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
new file mode 100644
index 000000000..ba7541ab1
--- /dev/null
+++ b/roles/flannel_register/README.md
@@ -0,0 +1,47 @@
+Role Name
+=========
+
+Register flannel configuration into etcd
+
+Requirements
+------------
+
+This role assumes it's being deployed on a RHEL/Fedora based host with package
+named 'flannel' available via yum, in version superior to 0.3.
+
+Role Variables
+--------------
+
+| Name | Default value | Description |
+|---------------------|----------------------------------------------------|-------------------------------------------------|
+| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
+| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
+| flannel_etcd_key | /openshift.com/network | etcd prefix |
+| etcd_hosts | etcd_urls | a list of etcd endpoints |
+| etcd_conf_dir | {{ openshift.common.config_base }}/master | SSL certificates directory |
+| etcd_peer_ca_file | {{ etcd_conf_dir }}/ca.crt | SSL CA to use for etcd |
+| etcd_peer_cert_file | {{ etcd_conf_dir }}/master.etcd-client.crt | SSL cert to use for etcd |
+| etcd_peer_key_file | {{ etcd_conf_dir }}/master.etcd-client.key | SSL key to use for etcd |
+
+Dependencies
+------------
+
+openshift_facts
+
+Example Playbook
+----------------
+
+ - hosts: openshift_master
+ roles:
+ - { flannel_register }
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Sylvain Baubeau <sbaubeau@redhat.com>
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
new file mode 100644
index 000000000..269d1a17c
--- /dev/null
+++ b/roles/flannel_register/defaults/main.yaml
@@ -0,0 +1,11 @@
+---
+flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_min_network: 172.30.5.0
+flannel_subnet_len: 24
+flannel_etcd_key: /openshift.com/network
+etcd_hosts: "{{ etcd_urls }}"
+etcd_conf_dir: "{{ openshift.common.config_base }}/master"
+etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
+
diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml
new file mode 100644
index 000000000..73bddcca4
--- /dev/null
+++ b/roles/flannel_register/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Sylvain
+ description: register flannel configuration into etcd
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
new file mode 100644
index 000000000..1629157c8
--- /dev/null
+++ b/roles/flannel_register/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Assures /etc/flannel dir exists
+ sudo: true
+ file: path=/etc/flannel state=directory
+
+- name: Generate etcd configuration for etcd
+ sudo: true
+ template:
+ src: "flannel-config.json"
+ dest: "/etc/flannel/config.json"
+
+- name: Insert flannel configuration into etcd
+ sudo: true
+ command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/flannel_register/templates/flannel-config.json b/roles/flannel_register/templates/flannel-config.json
new file mode 100644
index 000000000..89ce4c30b
--- /dev/null
+++ b/roles/flannel_register/templates/flannel-config.json
@@ -0,0 +1,8 @@
+{
+ "Network": "{{ flannel_network }}",
+ "SubnetLen": {{ flannel_subnet_len }},
+ "SubnetMin": "{{ flannel_min_network }}",
+ "Backend": {
+ "Type": "host-gw"
+ }
+}
diff --git a/roles/haproxy/README.md b/roles/haproxy/README.md
new file mode 100644
index 000000000..5bc415066
--- /dev/null
+++ b/roles/haproxy/README.md
@@ -0,0 +1,34 @@
+HAProxy
+=======
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/haproxy/defaults/main.yml b/roles/haproxy/defaults/main.yml
new file mode 100644
index 000000000..7ba5bd485
--- /dev/null
+++ b/roles/haproxy/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+haproxy_frontends:
+- name: main
+ binds:
+ - "*:80"
+ default_backend: default
+
+haproxy_backends:
+- name: default
+ balance: roundrobin
+ servers:
+ - name: web01
+ address: 127.0.0.1:9000
+ opts: check
+
+os_firewall_use_firewalld: False
+os_firewall_allow:
+- service: haproxy stats
+ port: "9000/tcp"
+- service: haproxy balance
+ port: "8443/tcp"
diff --git a/roles/haproxy/handlers/main.yml b/roles/haproxy/handlers/main.yml
new file mode 100644
index 000000000..ee60adcab
--- /dev/null
+++ b/roles/haproxy/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart haproxy
+ service:
+ name: haproxy
+ state: restarted
diff --git a/roles/haproxy/meta/main.yml b/roles/haproxy/meta/main.yml
new file mode 100644
index 000000000..0fad106a9
--- /dev/null
+++ b/roles/haproxy/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: HAProxy
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- { role: os_firewall }
+- { role: openshift_repos }
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
new file mode 100644
index 000000000..5638b7313
--- /dev/null
+++ b/roles/haproxy/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install haproxy
+ yum:
+ pkg: haproxy
+ state: present
+
+- name: Configure haproxy
+ template:
+ src: haproxy.cfg.j2
+ dest: /etc/haproxy/haproxy.cfg
+ owner: root
+ group: root
+ mode: 0644
+ notify: restart haproxy
+
+- name: Enable and start haproxy
+ service:
+ name: haproxy
+ state: started
+ enabled: yes
+ register: start_result
+
+- name: Pause 30 seconds if haproxy was just started
+ pause: seconds=30
+ when: start_result | changed
diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2
new file mode 100644
index 000000000..c932af72f
--- /dev/null
+++ b/roles/haproxy/templates/haproxy.cfg.j2
@@ -0,0 +1,76 @@
+# Global settings
+#---------------------------------------------------------------------
+global
+ chroot /var/lib/haproxy
+ pidfile /var/run/haproxy.pid
+ maxconn 4000
+ user haproxy
+ group haproxy
+ daemon
+
+ # turn on stats unix socket
+ stats socket /var/lib/haproxy/stats
+
+#---------------------------------------------------------------------
+# common defaults that all the 'listen' and 'backend' sections will
+# use if not designated in their block
+#---------------------------------------------------------------------
+defaults
+ mode http
+ log global
+ option httplog
+ option dontlognull
+ option http-server-close
+ option forwardfor except 127.0.0.0/8
+ option redispatch
+ retries 3
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 300s
+ timeout server 300s
+ timeout http-keep-alive 10s
+ timeout check 10s
+ maxconn 3000
+
+listen stats :9000
+ mode http
+ stats enable
+ stats uri /
+
+{% for frontend in haproxy_frontends %}
+frontend {{ frontend.name }}
+{% for bind in frontend.binds %}
+ bind {{ bind }}
+{% endfor %}
+ default_backend {{ frontend.default_backend }}
+{% if 'mode' in frontend %}
+ mode {{ frontend.mode }}
+{% endif %}
+{% if 'options' in frontend %}
+{% for option in frontend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% if 'redirects' in frontend %}
+{% for redirect in frontend.redirects %}
+ redirect {{ redirect }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+
+{% for backend in haproxy_backends %}
+backend {{ backend.name }}
+ balance {{ backend.balance }}
+{% if 'mode' in backend %}
+ mode {{ backend.mode }}
+{% endif %}
+{% if 'options' in backend %}
+{% for option in backend.options %}
+ option {{ option }}
+{% endfor %}
+{% endif %}
+{% for server in backend.servers %}
+ server {{ server.name }} {{ server.address }} {{ server.opts }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 56c69c286..1520f79b2 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -44,6 +44,9 @@ kubernetes_url: https://10.245.1.2:6443
# Token to use for authentication to the API server
kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+# API Version to use for kubernetes
+kube_api_version: v1
```
## Dependencies
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
index e296492f9..bdd994d07 100644
--- a/roles/kube_nfs_volumes/defaults/main.yml
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -1,4 +1,10 @@
---
+kubernetes_url: https://172.30.0.1:443
+
+kube_api_version: v1
+
+kube_req_template: "../templates/{{ kube_api_version }}/nfs.json.j2"
+
# Options of NFS exports.
nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index f4a506234..d1dcf261a 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -16,10 +16,11 @@
- include: nfs.yml
- name: export physical volumes
- uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
- method=POST
- body='{{ lookup("template", "../templates/nfs.json.j2") }}'
- body_format=json
- status_code=201
- HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ uri:
+ url: "{{ kubernetes_url }}/api/{{ kube_api_version }}/persistentvolumes"
+ method: POST
+ body: "{{ lookup('template', kube_req_template) }}"
+ body_format: json
+ status_code: 201
+ HEADER_Authorization: "Bearer {{ kubernetes_token }}"
with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/templates/v1/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
new file mode 120000
index 000000000..49c1191bc
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/v1/nfs.json.j2
@@ -0,0 +1 @@
+../v1beta3/nfs.json.j2 \ No newline at end of file
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
index b42886ef1..b42886ef1 100644
--- a/roles/kube_nfs_volumes/templates/nfs.json.j2
+++ b/roles/kube_nfs_volumes/templates/v1beta3/nfs.json.j2
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index e7fd6fa21..43498c015 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -67,7 +67,24 @@ def get_template(zapi, template_name):
return None
return content['result'][0]
-def get_type(ztype):
+def get_multiplier(inval):
+ ''' Determine the multiplier
+ '''
+ if inval == None or inval == '':
+ return None, 0
+
+ rval = None
+ try:
+ rval = int(inval)
+ except ValueError:
+ pass
+
+ if rval:
+ return rval, 1
+
+ return rval, 0
+
+def get_zabbix_type(ztype):
'''
Determine which type of discoverrule this is
'''
@@ -87,6 +104,7 @@ def get_type(ztype):
'telnet': 14,
'calculated': 15,
'JMX': 16,
+ 'SNMP trap': 17,
}
for typ in _types.keys():
@@ -153,16 +171,21 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
description=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
interfaceid=dict(default=None, type='int'),
- ztype=dict(default='trapper', type='str'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
status=dict(default='enabled', type='str'),
applications=dict(default=[], type='list'),
- template_name=dict(default=None, type='str'),
discoveryrule_key=dict(default=None, type='str'),
+ interval=dict(default=60, type='int'),
+ delta=dict(default=0, type='int'),
+ multiplier=dict(default=None, type='str'),
+ units=dict(default=None, type='str'),
+
),
#supports_check_mode=True
)
@@ -205,15 +228,23 @@ def main():
# Create and Update
if state == 'present':
+
+ formula, use_multiplier = get_multiplier(module.params['multiplier'])
+
params = {'name': module.params['name'],
'key_': module.params['key'],
'hostid': template['templateid'],
'interfaceid': module.params['interfaceid'],
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
- 'type': get_type(module.params['ztype']),
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
+ 'formula': formula,
+ 'multiplier': use_multiplier,
'description': module.params['description'],
+ 'units': module.params['units'],
+ 'delay': module.params['interval'],
+ 'delta': module.params['delta'],
}
if params['type'] in [2, 5, 7, 8, 11, 15]:
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index ac9cf756b..44c4e6766 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -84,6 +84,10 @@
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
+ multiplier: "{{ item.multiplier | default('', True) }}"
+ units: "{{ item.units | default('', True) }}"
+ interval: "{{ item.interval | default(60, True) }}"
+ delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitemprototypes
when: template.zitemprototypes is defined
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 9cc15c0a8..f6919dada 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -1,12 +1,16 @@
---
- yum:
- name: openshift-ansible-inventory
+ name: "{{ item }}"
state: present
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
- name:
copy:
content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_ec2.yaml
+ dest: /etc/ansible/multi_inventory.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
mode: "0640"
@@ -20,17 +24,17 @@
- file:
state: link
- src: /usr/share/ansible/inventory/multi_ec2.py
- dest: /etc/ansible/inventory/multi_ec2.py
+ src: /usr/share/ansible/inventory/multi_inventory.py
+ dest: /etc/ansible/inventory/multi_inventory.py
owner: root
group: libra_ops
# This cron uses the above location to call its job
- name: Cron to keep cache fresh
cron:
- name: 'multi_ec2_inventory'
+ name: 'multi_inventory'
minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+ job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
- name: Set cache location
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 73bd28630..38d5a08e4 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,4 +1,8 @@
---
+- fail:
+ msg: Flannel can not be used with openshift sdn
+ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+
- name: Set common Cluster facts
openshift_facts:
role: common
@@ -13,6 +17,7 @@
sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
+ use_flannel: "{{ openshift_use_flannel | default(None) }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 2043985ec..8e8bc6868 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -14,5 +14,7 @@ db_templates_base: "{{ examples_base }}/db-templates"
xpaas_image_streams: "{{ examples_base }}/xpaas-streams/jboss-image-streams.json"
xpaas_templates_base: "{{ examples_base }}/xpaas-templates"
quickstarts_base: "{{ examples_base }}/quickstart-templates"
+infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin"
+infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise"
openshift_examples_import_command: "create"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 21137e31b..3fda86907 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -7,8 +7,10 @@
EXAMPLES_BASE=$(pwd)/files/examples
find files/examples -name '*.json' -delete
+find files/examples -name '*.yaml' -delete
TEMP=`mktemp -d`
pushd $TEMP
+
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/openshift/django-ex/archive/master.zip -O django-ex-master.zip
wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master.zip
@@ -33,5 +35,11 @@ cp dancer-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp cakephp-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
mv application-templates-master/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
find application-templates-master/ -name '*.json' ! -wholename '*secret*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+
+wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
+cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
+
popd
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..b3b60bf9b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
new file mode 100644
index 000000000..4c798e148
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 40b7a5d6e..f48e207e7 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -37,6 +37,22 @@
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
changed_when: false
+- name: Import origin infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_origin_base }}
+ when: openshift_examples_load_centos | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
+- name: Import enterprise infrastructure-templates
+ command: >
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ infrastructure_enterprise_base }}
+ when: openshift_examples_load_rhel | bool
+ register: oex_import_infrastructure
+ failed_when: "'already exists' not in oex_import_infrastructure.stderr and oex_import_infrastructure.rc != 0"
+ changed_when: false
+
- name: Import xPaas image streams
command: >
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 163e67f62..2e1075aca 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -20,6 +20,8 @@ EXAMPLES = '''
import ConfigParser
import copy
import os
+import StringIO
+import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
from netaddr import IPNetwork
@@ -307,6 +309,23 @@ def set_fluentd_facts_if_unset(facts):
facts['common']['use_fluentd'] = use_fluentd
return facts
+def set_flannel_facts_if_unset(facts):
+ """ Set flannel facts if not already present in facts dict
+ dict: the facts dict updated with the flannel facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the flannel
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ if 'use_flannel' not in facts['common']:
+ use_flannel = False
+ facts['common']['use_flannel'] = use_flannel
+ return facts
+
def set_node_schedulability(facts):
""" Set schedulable facts if not already present in facts dict
Args:
@@ -407,7 +426,7 @@ def set_identity_providers_if_unset(facts):
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
- if deployment_type == 'enterprise':
+ if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
@@ -484,12 +503,16 @@ def set_aggregate_facts(facts):
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
+ internal_hostnames = set()
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ internal_hostnames.add(facts['common']['hostname'])
+ internal_hostnames.add(facts['common']['ip'])
+
if 'master' in facts:
# FIXME: not sure why but facts['dns']['domain'] fails
cluster_domain = 'cluster.local'
@@ -497,14 +520,61 @@ def set_aggregate_facts(facts):
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
- all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc',
- 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
- 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain])
+ svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+ 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+ 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+ all_hostnames.update(svc_names)
+ internal_hostnames.update(svc_names)
first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
all_hostnames.add(first_svc_ip)
+ internal_hostnames.add(first_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
+ facts['common']['internal_hostnames'] = list(internal_hostnames)
+
+ return facts
+
+
+def set_etcd_facts_if_unset(facts):
+ """
+ If using embedded etcd, loads the data directory from master-config.yaml.
+
+ If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
+ If anything goes wrong parsing these, the fact will not be set.
+ """
+ if 'etcd' in facts:
+ if 'master' in facts and facts['master']['embedded_etcd']:
+ try:
+ # Parse master config to find actual etcd data dir:
+ master_cfg_path = os.path.join(facts['common']['config_base'],
+ 'master/master-config.yaml')
+ master_cfg_f = open(master_cfg_path, 'r')
+ config = yaml.safe_load(master_cfg_f.read())
+ master_cfg_f.close()
+
+ facts['etcd']['etcd_data_dir'] = \
+ config['etcdConfig']['storageDirectory']
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+ else:
+ # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+ try:
+ # Add a fake section for parsing:
+ ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+ ini_fp = StringIO.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+ if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+ etcd_data_dir = etcd_data_dir[1:-1]
+ facts['etcd']['etcd_data_dir'] = etcd_data_dir
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
return facts
def set_deployment_facts_if_unset(facts):
@@ -534,21 +604,18 @@ def set_deployment_facts_if_unset(facts):
config_base = '/etc/origin'
if deployment_type in ['enterprise', 'online']:
config_base = '/etc/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
+ config_base = '/etc/openshift'
facts['common']['config_base'] = config_base
if 'data_dir' not in facts['common']:
data_dir = '/var/lib/origin'
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
+ # Handle upgrade scenarios when symlinks don't yet exist:
+ if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
+ data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
- facts['common']['version'] = version = get_openshift_version()
- if version is not None:
- if deployment_type == 'origin':
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
- else:
- version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
- else:
- version_gt_3_1_or_1_1 = True
- facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
for role in ('master', 'node'):
if role in facts:
@@ -582,12 +649,34 @@ def set_deployment_facts_if_unset(facts):
return facts
+def set_version_facts_if_unset(facts):
+ """ Set version facts. This currently includes common.version and
+ common.version_greater_than_3_1_or_1_1.
-def set_sdn_facts_if_unset(facts):
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with version facts.
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ facts['common']['version'] = version = get_openshift_version()
+ if version is not None:
+ if deployment_type == 'origin':
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+ else:
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+ else:
+ version_gt_3_1_or_1_1 = True
+ facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
+ return facts
+
+def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
+ system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
@@ -606,9 +695,18 @@ def set_sdn_facts_if_unset(facts):
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = '8'
- if 'node' in facts:
- if 'sdn_mtu' not in facts['node']:
- facts['node']['sdn_mtu'] = '1450'
+ if 'node' in facts and 'sdn_mtu' not in facts['node']:
+ node_ip = facts['common']['ip']
+
+ # default MTU if interface MTU cannot be detected
+ facts['node']['sdn_mtu'] = '1450'
+
+ for val in system_facts.itervalues():
+ if isinstance(val, dict) and 'mtu' in val:
+ mtu = val['mtu']
+
+ if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
+ facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
@@ -841,7 +939,7 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -875,13 +973,16 @@ class OpenShiftFacts(object):
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
+ facts = set_flannel_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
+ facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
+ facts = set_etcd_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -920,11 +1021,12 @@ class OpenShiftFacts(object):
session_name='ssn', session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
- oauth_grant_method='auto', cluster_defer_ha=False)
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
- node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16')
+ node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
+ iptables_sync_period='5s')
defaults['node'] = node
return defaults
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index a46b45b8c..a28aa7ba2 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -6,8 +6,11 @@
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
-- name: Ensure python-netaddr is installed
- yum: pkg=python-netaddr state=installed
+- name: Ensure python-netaddr and PyYaml are installed
+ yum: pkg={{ item }} state=installed
+ with_items:
+ - python-netaddr
+ - PyYAML
- name: Gather Cluster facts
openshift_facts:
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 37028e0f6..4b9500cbd 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -2,3 +2,13 @@
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
+
+- name: restart master api
+ service: name={{ openshift.common.service_type }}-master-api state=restarted
+ when: (openshift_master_ha | bool) and (not master_api_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+
+# TODO: need to fix up ignore_errors here
+- name: restart master controllers
+ service: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when: (openshift_master_ha | bool) and (not master_controllers_service_status_changed | default(false)) and openshift.master.cluster_method == 'native'
+ ignore_errors: yes
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 3a886935f..185bfb8f3 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -9,16 +9,22 @@
when: openshift_master_oauth_grant_method is defined
- fail:
+ msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
+ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
+- fail:
+ msg: "'native' high availability is not supported for the requested OpenShift version"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool
+- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool and openshift_master_cluster_password is not defined
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
- name: Set master facts
openshift_facts:
role: master
local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
@@ -41,6 +47,8 @@
portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(None) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
@@ -63,6 +71,8 @@
controller_args: "{{ osm_controller_args | default(None) }}"
infra_nodes: "{{ num_infra | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
+ master_count: "{{ openshift_master_count | default(None) }}"
+ controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
@@ -77,7 +87,7 @@
domain: cluster.local
when: openshift.master.embedded_dns
-- name: Create config parent directory if it doesn't exist
+- name: Create config parent directory if it does not exist
file:
path: "{{ openshift_master_config_dir }}"
state: directory
@@ -90,6 +100,8 @@
creates: "{{ openshift_master_policy }}"
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Create the scheduler config
template:
@@ -98,6 +110,8 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
@@ -120,6 +134,44 @@
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+# workaround for missing systemd unit files for controllers/api
+- name: Create the api service file
+ template:
+ src: atomic-openshift-master-api.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers service file
+ template:
+ src: atomic-openshift-master-controllers.service.j2
+ dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the api env file
+ template:
+ src: atomic-openshift-master-api.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- name: Create the controllers env file
+ template:
+ src: atomic-openshift-master-controllers.j2
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ force: no
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+- command: systemctl daemon-reload
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+# end workaround for missing systemd unit files
+
+- name: Create session secrets file
+ template:
+ dest: "{{ openshift.master.session_secrets_file }}"
+ src: sessionSecretsFile.yaml.v1.j2
+ force: no
+ notify:
+ - restart master
+ - restart master api
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
@@ -128,12 +180,15 @@
backup: true
notify:
- restart master
+ - restart master api
+ - restart master controllers
- name: Configure master settings
lineinfile:
dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
regexp: "{{ item.regex }}"
line: "{{ item.line }}"
+ create: yes
with_items:
- regex: '^OPTIONS='
line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
@@ -142,6 +197,34 @@
notify:
- restart master
+- name: Configure master api settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8443 --master=https://{{ openshift.common.ip }}:8443"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master api
+
+- name: Configure master controller settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen=https://0.0.0.0:8444"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
+ when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
+ notify:
+ - restart master controllers
+
- name: Start and enable master
service: name={{ openshift.common.service_type }}-master enabled=yes state=started
when: not openshift_master_ha | bool
@@ -149,15 +232,37 @@
- set_fact:
master_service_status_changed = start_result | changed
+ when: not openshift_master_ha | bool
+
+- name: Start and enable master api
+ service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+
+- set_fact:
+ master_api_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+
+# TODO: fix the ugly workaround of setting ignore_errors
+# the controllers service tries to start even if it is already started
+- name: Start and enable master controller
+ service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
+ register: start_result
+ ignore_errors: yes
+
+- set_fact:
+ master_controllers_service_status_changed = start_result | changed
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool and not openshift.master.cluster_defer_ha | bool
+ when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
- name: Set the cluster user password
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
new file mode 100644
index 000000000..ba19fb348
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-api.service.j2
@@ -0,0 +1,21 @@
+[Unit]
+Description=Atomic OpenShift Master API
+Documentation=https://github.com/openshift/origin
+After=network.target
+After=etcd.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master api --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier=atomic-openshift-master-api
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
new file mode 100644
index 000000000..205934248
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.j2
@@ -0,0 +1,9 @@
+OPTIONS=
+CONFIG_FILE={{ openshift_master_config_dir }}/master-config.yaml
+
+# Proxy configuration
+# Origin uses standard HTTP_PROXY environment variables. Be sure to set
+# NO_PROXY for your master
+#NO_PROXY=master.example.com
+#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
+#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
diff --git a/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
new file mode 100644
index 000000000..8952c86ef
--- /dev/null
+++ b/roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
@@ -0,0 +1,22 @@
+[Unit]
+Description=Atomic OpenShift Master Controllers
+Documentation=https://github.com/openshift/origin
+After=network.target
+After={{ openshift.common.service_type }}-master-api.service
+Before={{ openshift.common.service_type }}-node.service
+Requires=network.target
+
+[Service]
+Type=notify
+EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
+Environment=GOTRACEBACK=crash
+ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
+LimitNOFILE=131072
+LimitCORE=infinity
+WorkingDirectory={{ openshift.common.data_dir }}
+SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+WantedBy={{ openshift.common.service_type }}-node.service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 73a0bc6cc..bb12a0a0f 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,5 +1,5 @@
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
- v1beta3
{% endif %}
- v1
@@ -10,24 +10,33 @@ assetConfig:
publicURL: {{ openshift.master.public_console_url }}/
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
requestTimeoutSeconds: 0
+{% if openshift_master_ha | bool %}
+controllerLeaseTTL: {{ openshift.master.controller_lease_ttl | default('30') }}
+{% endif %}
+controllers: '*'
corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- {{ origin }}
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+ - {{ name }}
+{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+ bindNetwork: tcp4
{% endif %}
etcdClientInfo:
ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
@@ -70,16 +79,15 @@ kubeletClientInfo:
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
+{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %}
apiLevels:
-{% if openshift.common.deployment_type == "enterprise" %}
- v1beta3
-{% endif %}
- v1
+{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
-{# TODO: support overriding masterCount #}
- masterCount: 1
- masterIP: ""
+ masterCount: {{ openshift.master.master_count }}
+ masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
certFile: master.proxy-client.crt
@@ -103,6 +111,7 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
+pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
openshiftInfrastructureNamespace: openshift-infra
@@ -118,6 +127,7 @@ projectConfig:
routingConfig:
subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
+ limitSecretReferences: false
managedNames:
- default
- builder
@@ -128,8 +138,20 @@ serviceAccountConfig:
- serviceaccounts.public.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
+{% if named_certificates %}
+ namedCertificates:
+{% for named_certificate in named_certificates %}
+ - certFile: {{ named_certificate['certfile'] }}
+ keyFile: {{ named_certificate['keyfile'] }}
+ names:
+{% for name in named_certificate['names'] %}
+ - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2 b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
new file mode 100644
index 000000000..d12d9db90
--- /dev/null
+++ b/roles/openshift_master/templates/sessionSecretsFile.yaml.v1.j2
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: SessionSecrets
+secrets:
+{% for secret in openshift_master_session_auth_secrets %}
+- authentication: "{{ openshift_master_session_auth_secrets[loop.index0] }}"
+ encryption: "{{ openshift_master_session_encryption_secrets[loop.index0] }}"
+{% endfor %}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index ecdb4f883..534465451 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -2,6 +2,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_session_secrets_file: "{{ openshift_master_config_dir }}/session-secrets.yaml"
openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index cfd1ceabf..314f068e7 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -14,7 +14,7 @@
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --hostnames={{ master_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 4b39b043a..13e5d7a4b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -6,13 +6,9 @@
mode: 0700
with_items: masters_needing_certs
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: hard
- with_nested:
- - masters_needing_certs
- - - ca.crt
+- set_fact:
+ master_certificates:
+ - ca.crt
- ca.key
- ca.serial.txt
- admin.crt
@@ -20,8 +16,6 @@
- admin.kubeconfig
- master.kubelet-client.crt
- master.kubelet-client.key
- - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- openshift-master.crt
- openshift-master.key
- openshift-master.kubeconfig
@@ -33,9 +27,17 @@
- openshift-router.kubeconfig
- serviceaccounts.private.key
- serviceaccounts.public.key
+ master_31_certificates:
+ - master.proxy-client.crt
+ - master.proxy-client.key
-- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}"
- with_items: masters_needing_certs
+- file:
+ src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
+ dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ state: hard
+ with_nested:
+ - masters_needing_certs
+ - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}"
- name: Create the master certificates if they do not already exist
command: >
diff --git a/roles/openshift_master_cluster/tasks/configure_deferred.yml b/roles/openshift_master_cluster/tasks/configure_deferred.yml
deleted file mode 100644
index 3b416005b..000000000
--- a/roles/openshift_master_cluster/tasks/configure_deferred.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- debug: msg="Deferring config"
-
-- name: Start and enable the master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 315947183..6303a6e46 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -4,10 +4,7 @@
register: pcs_status
changed_when: false
failed_when: false
- when: not openshift.master.cluster_defer_ha | bool
+ when: openshift.master.cluster_method == "pacemaker"
- include: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
-
-- include: configure_deferred.yml
- when: openshift.master.cluster_defer_ha | bool
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c92008a77..9d40ae3b3 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -13,3 +13,4 @@ galaxy_info:
- cloud
dependencies:
- { role: openshift_common }
+- { role: docker }
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index aea60b75c..d11bc5123 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -8,7 +8,7 @@
when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
- when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online']
+ when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
- name: Set node facts
openshift_facts:
@@ -22,16 +22,17 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
annotations: "{{ openshift_node_annotations | default(none) }}"
- registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
- kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
- sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}"
docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}"
+ iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
+ kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 4931d127e..7d2f506e3 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -4,6 +4,7 @@ dnsDomain: {{ osn_cluster_dns_domain }}
dnsIP: {{ osn_cluster_dns_ip }}
dockerConfig:
execHandlerName: ""
+iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
latest: false
@@ -22,6 +23,7 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+nodeIP: {{ openshift.common.ip }}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 12e98b7a1..aa696ae12 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -8,7 +8,7 @@
# proper repos correctly.
- assert:
- that: openshift_deployment_type in known_openshift_deployment_types
+ that: openshift.common.deployment_type in known_openshift_deployment_types
- name: Ensure libselinux-python is installed
yum:
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index aeeec4b8d..04665be62 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -194,6 +194,16 @@ g_template_os_linux:
lifetime: 1
description: "Dynamically register the filesystems"
+ - name: disc.disk
+ key: disc.disk
+ lifetime: 1
+ description: "Dynamically register disks on a node"
+
+ - name: disc.network
+ key: disc.network
+ lifetime: 1
+ description: "Dynamically register network interfaces on a node"
+
zitemprototypes:
- discoveryrule_key: disc.filesys
name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -211,6 +221,42 @@ g_template_os_linux:
applications:
- Disk
+ - discoveryrule_key: disc.disk
+ name: "TPS (IOPS) for disk {#OSO_DISK}"
+ key: "disc.disk.tps[{#OSO_DISK}]"
+ value_type: int
+ description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.disk
+ name: "Percent Utilized for disk {#OSO_DISK}"
+ key: "disc.disk.putil[{#OSO_DISK}]"
+ value_type: float
+ description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command"
+ applications:
+ - Disk
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
+ - discoveryrule_key: disc.network
+ name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
+ key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]"
+ value_type: int
+ units: B
+ delta: 1
+ description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
+ applications:
+ - Network
+
ztriggerprototypes:
- name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'