diff options
Diffstat (limited to 'roles')
70 files changed, 850 insertions, 510 deletions
diff --git a/roles/dns/README.md b/roles/dns/README.md new file mode 100644 index 000000000..e238fb92e --- /dev/null +++ b/roles/dns/README.md @@ -0,0 +1,43 @@ +dns +=== + +Configure a DNS server serving IPs of all the nodes of the cluster + +Requirements +------------ + +None + +Role Variables +-------------- + +| Name | Mandatory / Optional | Description | +|------|----------------------|-------------| +| `dns_zones` | Mandatory | DNS zones in which we must find the hosts | +| `dns_forwarders` | If not set, the DNS will be a recursive non-forwarding DNS server | DNS forwarders to delegate the requests for hosts outside of `dns_zones` | +| `dns_all_hosts` | Mandatory | Exhaustive list of hosts | + +Dependencies +------------ + +None + +Example Playbook +---------------- + + - hosts: dns_hosts + roles: + - role: dns + dns_forwarders: [ '8.8.8.8', '8.8.4.4' ] + dns_zones: [ novalocal, openstacklocal ] + dns_all_hosts: "{{ g_all_hosts }}" + +License +------- + +ASL 2.0 + +Author Information +------------------ + +OpenShift operations, Red Hat, Inc diff --git a/roles/dns/handlers/main.yml b/roles/dns/handlers/main.yml new file mode 100644 index 000000000..ef101785e --- /dev/null +++ b/roles/dns/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart bind + service: + name: named + state: restarted diff --git a/roles/dns/meta/main.yml b/roles/dns/meta/main.yml new file mode 100644 index 000000000..b6e9d9ad0 --- /dev/null +++ b/roles/dns/meta/main.yml @@ -0,0 +1,7 @@ +--- +galaxy_info: + author: Lénaïc Huard + description: Deploy and configure a DNS server + company: Amadeus SAS + license: ASL 2.0 +dependencies: [] diff --git a/roles/dns/tasks/main.yml b/roles/dns/tasks/main.yml new file mode 100644 index 000000000..af728585d --- /dev/null +++ b/roles/dns/tasks/main.yml @@ -0,0 +1,22 @@ +- name: Install Bind + action: "{{ ansible_pkg_mgr }} name=bind" + +- name: Configure Bind + template: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + validate: "{{ item.validate }}" + with_items: + - src: openshift-cluster.zone + dest: /var/named/openshift-cluster.zone + validate: "named-checkzone {{ dns_zones[0] }} %s" + - src: named.conf + dest: /etc/named.conf + validate: "named-checkconf %s" + notify: restart bind + +- name: Enable Bind + service: + name: named + state: started + enabled: yes diff --git a/roles/dns/templates/named.conf b/roles/dns/templates/named.conf new file mode 100644 index 000000000..22c1ff935 --- /dev/null +++ b/roles/dns/templates/named.conf @@ -0,0 +1,23 @@ +options +{ + directory "/var/named"; + + allow-query { {{ ansible_default_ipv4.network }}/24; }; + + recursion yes; + +{% if dns_forwarders is defined %} + forwarders { + {% for dns in dns_forwarders %} + {{ dns }}; + {% endfor %} + }; +{% endif %} +}; +{% for zone in dns_zones %} + +zone "{{ zone }}" IN { + type master; + file "openshift-cluster.zone"; +}; +{% endfor %} diff --git a/roles/dns/templates/openshift-cluster.zone b/roles/dns/templates/openshift-cluster.zone new file mode 100644 index 000000000..03f5dc089 --- /dev/null +++ b/roles/dns/templates/openshift-cluster.zone @@ -0,0 +1,14 @@ +$TTL 1d +@ IN SOA {{ ansible_hostname }} openshift ( + {{ ansible_date_time.epoch }} ; Serial (To be fixed before 2039) + 12h ; Refresh + 3m ; Retry + 4w ; Expire + 3h ; TTL for negative replies + ) + + IN NS {{ ansible_hostname }} +{{ ansible_hostname }} IN A {{ ansible_default_ipv4.address }} +{% for host in dns_all_hosts %} +{{ hostvars[host].ansible_hostname }} IN A {{ hostvars[host]['ansible_default_ipv4'].address }} +{% endfor %} diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 1e97b047b..e72509c4d 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -8,7 +8,7 @@ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4" - name: Install etcd - action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present" + action: "{{ ansible_pkg_mgr }} name=etcd state=present" when: not openshift.common.is_containerized | bool - name: Pull etcd container diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml index cd108495d..be75fdab2 100644 --- a/roles/etcd_common/tasks/main.yml +++ b/roles/etcd_common/tasks/main.yml @@ -5,9 +5,9 @@ - fail: msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}" when: "'etcd_interface' in item.value and 'interface' not in item.value" - with_dict: etcd_host_int_map + with_dict: etcd_host_int_map | default({}) - fail: msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }} when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4" - with_dict: etcd_host_int_map + with_dict: etcd_host_int_map | default({}) diff --git a/roles/fluentd_master/meta/main.yml b/roles/fluentd_master/meta/main.yml new file mode 100644 index 000000000..148bc377e --- /dev/null +++ b/roles/fluentd_master/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: OpenShift Red Hat + description: Fluentd Master + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - monitoring + dependencies: + - openshift_facts diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml deleted file mode 100644 index 32f972f0a..000000000 --- a/roles/fluentd_master/tasks/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- fail: - msg: "fluentd master is not yet supported on atomic hosts" - when: openshift.common.is_atomic | bool - -# TODO: Update fluentd install and configuration when packaging is complete -- name: download and install td-agent - action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - when: not openshift.common.is_atomic | bool - -- name: Verify fluentd plugin installed - command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' - register: _fluent_plugin_check - failed_when: false - changed_when: false - -- name: install Kubernetes fluentd plugin - command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes' - when: _fluent_plugin_check.rc == 1 - -- name: Creates directories - file: - path: "{{ item }}" - state: directory - group: 'td-agent' - owner: 'td-agent' - mode: 0755 - with_items: ['/etc/td-agent/config.d'] - -- name: Add include to td-agent configuration - lineinfile: - dest: '/etc/td-agent/td-agent.conf' - regexp: '^@include config.d' - line: '@include config.d/*.conf' - state: present - -- name: install Kubernetes fluentd configuration file - template: - src: kubernetes.conf.j2 - dest: /etc/td-agent/config.d/kubernetes.conf - group: 'td-agent' - owner: 'td-agent' - mode: 0444 - -- name: wait for etcd to start up - wait_for: port=4001 delay=10 - when: embedded_etcd | bool - -- name: wait for etcd peer to start up - wait_for: port=7001 delay=10 - when: embedded_etcd | bool - -- name: ensure td-agent is running - service: - name: 'td-agent' - state: started - enabled: yes diff --git a/roles/fluentd_master/templates/kubernetes.conf.j2 b/roles/fluentd_master/templates/kubernetes.conf.j2 deleted file mode 100644 index 7b5c86062..000000000 --- a/roles/fluentd_master/templates/kubernetes.conf.j2 +++ /dev/null @@ -1,9 +0,0 @@ -<match kubernetes.**> - type file - path /var/log/td-agent/containers.log - time_slice_format %Y%m%d - time_slice_wait 10m - time_format %Y%m%dT%H%M%S%z - compress gzip - utc -</match> diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml deleted file mode 100644 index 9fd908687..000000000 --- a/roles/fluentd_node/tasks/main.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- fail: - msg: "fluentd node is not yet supported on atomic hosts" - when: openshift.common.is_atomic | bool - -# TODO: Update fluentd install and configuration when packaging is complete -- name: download and install td-agent - action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present" - when: not openshift.common.is_atomic | bool - -- name: Verify fluentd plugin installed - command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes' - register: _fluent_plugin_check - failed_when: false - changed_when: false - -- name: install Kubernetes fluentd plugin - command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes' - when: _fluent_plugin_check.rc == 1 - -- name: Override td-agent configuration file - template: - src: td-agent.j2 - dest: /etc/sysconfig/td-agent - group: 'td-agent' - owner: 'td-agent' - mode: 0444 - -- name: Creates directories - file: - path: "{{ item }}" - state: directory - group: 'td-agent' - owner: 'td-agent' - mode: 0755 - with_items: ['/etc/td-agent/config.d', '/var/log/td-agent/tmp'] - -- name: Add include to td-agent configuration - lineinfile: - dest: '/etc/td-agent/td-agent.conf' - regexp: '^@include config.d' - line: '@include config.d/*.conf' - state: present - -- name: install Kubernetes fluentd configuration file - template: - src: kubernetes.conf.j2 - dest: /etc/td-agent/config.d/kubernetes.conf - group: 'td-agent' - owner: 'td-agent' - mode: 0444 - -- name: ensure td-agent is running - service: - name: 'td-agent' - state: started - enabled: yes diff --git a/roles/fluentd_node/templates/kubernetes.conf.j2 b/roles/fluentd_node/templates/kubernetes.conf.j2 deleted file mode 100644 index 5f1eecb20..000000000 --- a/roles/fluentd_node/templates/kubernetes.conf.j2 +++ /dev/null @@ -1,53 +0,0 @@ -<source> - type tail - path /var/lib/docker/containers/*/*-json.log - pos_file /var/log/td-agent/tmp/fluentd-docker.pos - time_format %Y-%m-%dT%H:%M:%S - tag docker.* - format json - read_from_head true -</source> - -<match docker.var.lib.docker.containers.*.*.log> - type kubernetes - container_id ${tag_parts[5]} - tag docker.${name} -</match> - -<match kubernetes> - type copy - - <store> - type forward - send_timeout 60s - recover_wait 10s - heartbeat_interval 1s - phi_threshold 16 - hard_timeout 60s - log_level trace - require_ack_response true - heartbeat_type tcp - - <server> - name {{groups['oo_first_master'][0]}} - host {{hostvars[groups['oo_first_master'][0]].openshift.common.hostname}} - port 24224 - weight 60 - </server> - - <secondary> - type file - path /var/log/td-agent/forward-failed - </secondary> - </store> - - <store> - type file - path /var/log/td-agent/containers.log - time_slice_format %Y%m%d - time_slice_wait 10m - time_format %Y%m%dT%H%M%S%z - compress gzip - utc - </store> -</match> diff --git a/roles/fluentd_node/templates/td-agent.j2 b/roles/fluentd_node/templates/td-agent.j2 deleted file mode 100644 index 7245e11ec..000000000 --- a/roles/fluentd_node/templates/td-agent.j2 +++ /dev/null @@ -1,2 +0,0 @@ -DAEMON_ARGS= -TD_AGENT_ARGS="/usr/sbin/td-agent --log /var/log/td-agent/td-agent.log --use-v1-config" diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py index 5e088a674..7b80064f4 100644 --- a/roles/lib_dyn/library/dyn_record.py +++ b/roles/lib_dyn/library/dyn_record.py @@ -95,6 +95,26 @@ requirements: [ dyn ] author: "Russell Harrison" ''' +EXAMPLES = ''' +- name: Update CNAME record + local_action: + module: dyn_record + state: present + record_fqdn: www.example.com + zone: example.com + record_type: CNAME + record_value: web1.example.com + +- name: Update A record + local_action: + module: dyn_record + state: present + record_fqdn: web1.example.com + zone: example.com + record_value: 10.0.0.10 + record_type: A +''' + try: IMPORT_ERROR = False from dyn.tm.session import DynectSession @@ -158,15 +178,15 @@ def main(): '''Ansible module for managing Dyn DNS records.''' module = AnsibleModule( argument_spec=dict( - state=dict(required=True, choices=['present', 'absent', 'list']), + state=dict(default='present', choices=['present', 'absent', 'list']), customer_name=dict(default=os.environ.get('DYNECT_CUSTOMER_NAME', None), type='str'), user_name=dict(default=os.environ.get('DYNECT_USER_NAME', None), type='str', no_log=True), user_password=dict(default=os.environ.get('DYNECT_PASSWORD', None), type='str', no_log=True), - zone=dict(required=True), - record_fqdn=dict(required=False), - record_type=dict(required=False, choices=[ + zone=dict(required=True, type='str'), + record_fqdn=dict(required=False, type='str'), + record_type=dict(required=False, type='str', choices=[ 'A', 'AAAA', 'CNAME', 'PTR', 'TXT']), - record_value=dict(required=False), + record_value=dict(required=False, type='str'), record_ttl=dict(required=False, default=0, type='int'), ), required_together=( @@ -224,11 +244,13 @@ def main(): # First get a list of existing records for the node values = get_record_values(dyn_node_records) value_key = get_record_key(module.params['record_type']) + param_value = module.params['record_value'] # Check to see if the record is already in place before doing anything. if (dyn_node_records and dyn_node_records[value_key][0].ttl == module.params['record_ttl'] and - module.params['record_value'] in values[value_key]): + (param_value in values[value_key] or + param_value + '.' in values[value_key])): module.exit_json(changed=False) diff --git a/roles/lib_dyn/tasks/main.yml b/roles/lib_dyn/tasks/main.yml index 9b3b1b0b9..965962928 100644 --- a/roles/lib_dyn/tasks/main.yml +++ b/roles/lib_dyn/tasks/main.yml @@ -3,3 +3,5 @@ - name: Make sure python-dyn is installed yum: name=python-dyn state=present + tags: + - lib_dyn diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py index 2f9524556..e7edcde2f 100644 --- a/roles/lib_zabbix/library/zbx_action.py +++ b/roles/lib_zabbix/library/zbx_action.py @@ -120,8 +120,8 @@ def opmessage_usr_diff(zab_val, user_val): ''' Report whether there are differences between opmessage_usr on zabbix and opmessage_usr supplied by user ''' - zab_usr_ids = set([usr['usrid'] for usr in zab_val]) - usr_ids = set([usr['usrid'] for usr in user_val]) + zab_usr_ids = set([usr['userid'] for usr in zab_val]) + usr_ids = set([usr['userid'] for usr in user_val]) if usr_ids != zab_usr_ids: return True @@ -228,12 +228,12 @@ def get_user_groups(zapi, groups): '''get the mediatype id from the mediatype name''' user_groups = [] - content = zapi.get_content('usergroup', - 'get', - {'search': {'name': groups}}) - - for usr_grp in content['result']: - user_groups.append({'usrgrpid': usr_grp['usrgrpid']}) + for group in groups: + content = zapi.get_content('usergroup', + 'get', + {'search': {'name': group}}) + for result in content['result']: + user_groups.append({'usrgrpid': result['usrgrpid']}) return user_groups diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 635d8a419..5d133cf16 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,7 +1,7 @@ --- -- name: restart nuagekubemon +- name: restart nuage-openshift-monitor sudo: true - service: name=nuagekubemon state=restarted + service: name=nuage-openshift-monitor state=restarted - name: restart master service: name={{ openshift.common.service_type }}-master state=restarted diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index a7baadc76..20d105b9e 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -1,15 +1,15 @@ --- -- name: Create directory /usr/share/nuagekubemon +- name: Create directory /usr/share/nuage-openshift-monitor sudo: true - file: path=/usr/share/nuagekubemon state=directory + file: path=/usr/share/nuage-openshift-monitor state=directory - name: Create the log directory sudo: true - file: path={{ nuagekubemon_log_dir }} state=directory + file: path={{ nuage_openshift_monitor_log_dir }} state=directory -- name: Install Nuage Kubemon +- name: Install Nuage Openshift Monitor sudo: true - yum: name={{ nuage_kubemon_rpm }} state=present + yum: name={{ nuage_openshift_rpm }} state=present - name: Run the service account creation script sudo: true @@ -24,11 +24,11 @@ - nuage.key - nuage.kubeconfig -- name: Create nuagekubemon.yaml +- name: Create nuage-openshift-monitor.yaml sudo: true - template: src=nuagekubemon.j2 dest=/usr/share/nuagekubemon/nuagekubemon.yaml owner=root mode=0644 + template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644 notify: - restart master - restart master api - restart master controllers - - restart nuagekubemon + - restart nuage-openshift-monitor diff --git a/roles/nuage_master/templates/nuagekubemon.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2 index fb586bcee..db8c3d85e 100644 --- a/roles/nuage_master/templates/nuagekubemon.j2 +++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2 @@ -3,7 +3,7 @@ kubeConfig: {{ kube_config }} # name of the nuage service account, or another account with 'cluster-reader' # permissions # Openshift master config file -openshiftMasterConfig: {{ master_config_yaml }} +masterConfig: {{ master_config_yaml }} # URL of the VSD Architect vsdApiUrl: {{ vsd_api_url }} # API version to query against. Usually "v3_2" @@ -16,4 +16,4 @@ enterpriseName: {{ enterprise }} # Name of the domain in which pods will reside domainName: {{ domain }} # Location where logs should be saved -log_dir: {{ nuagekubemon_log_dir }} +log_dir: {{ nuage_openshift_monitor_log_dir }} diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index db901fea6..c489feabe 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -1,7 +1,7 @@ openshift_master_config_dir: "{{ openshift.common.config_base }}/master" ca_cert: "{{ openshift_master_config_dir }}/ca.crt" admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig" -cert_output_dir: /usr/share/nuagekubemon -kube_config: /usr/share/nuagekubemon/nuage.kubeconfig -kubemon_yaml: /usr/share/nuagekubemon/nuagekubemon.yaml +cert_output_dir: /usr/share/nuage-openshift-monitor +kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig +kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml" diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index e0117bf71..c1e49902d 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -22,16 +22,16 @@ - name: Copy the certificates and keys sudo: true - copy: src="/tmp/{{ item }}" dest="{{ vsp_k8s_dir }}/{{ item }}" + copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}" with_items: - ca.crt - nuage.crt - nuage.key - nuage.kubeconfig -- name: Set the vsp-k8s.yaml +- name: Set the vsp-openshift.yaml sudo: true - template: src=vsp-k8s.j2 dest={{ vsp_k8s_yaml }} owner=root mode=0644 + template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644 notify: - restart vrs - restart node diff --git a/roles/nuage_node/templates/vsp-k8s.j2 b/roles/nuage_node/templates/vsp-openshift.j2 index 98d6c3a9c..98d6c3a9c 100644 --- a/roles/nuage_node/templates/vsp-k8s.j2 +++ b/roles/nuage_node/templates/vsp-openshift.j2 diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index a6b7cf997..4975d17ed 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -1,9 +1,9 @@ --- vrs_config: /etc/default/openvswitch -vsp_k8s_dir: /usr/share/vsp-k8s -vsp_k8s_yaml: "{{ vsp_k8s_dir }}/vsp-k8s.yaml" -client_cert: "{{ vsp_k8s_dir }}/nuage.crt" -client_key: "{{ vsp_k8s_dir }}/nuage.key" -ca_cert: "{{ vsp_k8s_dir }}/ca.crt" +vsp_openshift_dir: /usr/share/vsp-openshift +vsp_openshift_yaml: "{{ vsp_openshift_dir }}/vsp-openshift.yaml" +client_cert: "{{ vsp_openshift_dir }}/nuage.crt" +client_key: "{{ vsp_openshift_dir }}/nuage.key" +ca_cert: "{{ vsp_openshift_dir }}/ca.crt" api_server: "{{ openshift_node_master_api_url }}" docker_bridge: "docker0" diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml index 9b7735e54..d45f62eca 100644 --- a/roles/openshift_cluster_metrics/tasks/main.yml +++ b/roles/openshift_cluster_metrics/tasks/main.yml @@ -3,12 +3,12 @@ - name: Install cluster metrics templates copy: src: cluster-metrics - dest: /etc/openshift/ + dest: /etc/origin/ - name: Create InfluxDB Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/influxdb.yaml + /etc/origin/cluster-metrics/influxdb.yaml register: oex_influxdb_services failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0" changed_when: false @@ -16,7 +16,7 @@ - name: Create Heapster Service Account command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/heapster-serviceaccount.yaml + /etc/origin/cluster-metrics/heapster-serviceaccount.yaml register: oex_heapster_serviceaccount failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0" changed_when: false @@ -35,7 +35,7 @@ - name: Create Heapster Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/heapster.yaml + /etc/origin/cluster-metrics/heapster.yaml register: oex_heapster_services failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0" changed_when: false @@ -43,7 +43,7 @@ - name: Create Grafana Services command: > {{ openshift.common.client_binary }} create -f - /etc/openshift/cluster-metrics/grafana.yaml + /etc/origin/cluster-metrics/grafana.yaml register: oex_grafana_services failed_when: "'already exists' not in oex_grafana_services.stderr and oex_grafana_services.rc != 0" changed_when: false diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index ff8c3b50f..a2fcff507 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -8,7 +8,7 @@ when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_nuage | default(false) | bool - fail: - msg: Nuage sdn can not be used with flannel + msg: Nuage sdn can not be used with flannel when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - fail: @@ -29,7 +29,6 @@ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" deployment_type: "{{ openshift_deployment_type }}" - use_fluentd: "{{ openshift_use_fluentd | default(None) }}" use_flannel: "{{ openshift_use_flannel | default(None) }}" use_nuage: "{{ openshift_use_nuage | default(None) }}" use_manageiq: "{{ openshift_use_manageiq | default(None) }}" @@ -39,13 +38,17 @@ action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present" when: not openshift.common.is_containerized | bool -- name: Set version facts +# This invocation also updates the version facts which are necessary +# for setting the hostname below. +- name: openshift_facts openshift_facts: + role: hosted + openshift_env: "{{ hostvars[inventory_hostname] | oo_openshift_env }}" - # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the - # hostname by default. +# For enterprise versions < 3.1 and origin versions < 1.1 we want to set the +# hostname by default. - set_fact: - set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}" + set_hostname_default: "{{ not openshift.common.version_gte_3_1_or_1_1 }}" - name: Set hostname command: > diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml index 5a285e773..a57cf815e 100644 --- a/roles/openshift_docker/tasks/main.yml +++ b/roles/openshift_docker/tasks/main.yml @@ -10,6 +10,7 @@ docker_additional_registries: "{{ docker_additional_registries }}" docker_insecure_registries: "{{ docker_insecure_registries }}" docker_blocked_registries: "{{ docker_blocked_registries }}" + docker_options: "{{ openshift_docker_options | default('',True) }}" - role: node local_facts: portal_net: "{{ openshift_master_portal_net | default(None) }}" @@ -44,10 +45,11 @@ lineinfile: dest: /etc/sysconfig/docker regexp: '^OPTIONS=.*$' - line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \ - {% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \ - {% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }} {% endif %} \ - {% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}} {% endif %} '" + line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }}\ + {% if ansible_selinux and ansible_selinux.status == '''enabled''' %} --selinux-enabled{% endif %}\ + {% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }}{% endif %}\ + {% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ + {% if openshift.common.docker_options is defined %} {{ openshift.common.docker_options }}{% endif %}'" when: docker_check.stat.isreg notify: - restart openshift_docker diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 6b9964aec..5e955590e 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -6,7 +6,7 @@ openshift_examples_load_db_templates: true openshift_examples_load_xpaas: "{{ openshift_deployment_type != 'origin' }}" openshift_examples_load_quickstarts: true -content_version: "{{ 'v1.1' if openshift.common.version_greater_than_3_1_or_1_1 else 'v1.0' }}" +content_version: "{{ 'v1.1' if openshift.common.version_gte_3_1_or_1_1 else 'v1.0' }}" examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized else '/usr/share/openshift' }}/examples" image_streams_base: "{{ examples_base }}/image-streams" diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml index ddd9f2f75..be999bd1a 100644 --- a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -85,7 +85,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"' name: IMAGE_VERSION - value: "3.1.0" + value: "3.1.1" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 40e54d706..0f25881f1 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -304,23 +304,6 @@ def normalize_provider_facts(provider, metadata): facts = normalize_openstack_facts(metadata, facts) return facts -def set_fluentd_facts_if_unset(facts): - """ Set fluentd facts if not already present in facts dict - dict: the facts dict updated with the generated fluentd facts if - missing - Args: - facts (dict): existing facts - Returns: - dict: the facts dict updated with the generated fluentd - facts if they were not already present - - """ - if 'common' in facts: - if 'use_fluentd' not in facts['common']: - use_fluentd = False - facts['common']['use_fluentd'] = use_fluentd - return facts - def set_flannel_facts_if_unset(facts): """ Set flannel facts if not already present in facts dict dict: the facts dict updated with the flannel facts if @@ -525,10 +508,11 @@ def set_url_facts_if_unset(facts): ports[prefix])) - r_lhn = "{0}:{1}".format(api_hostname, ports['api']).replace('.', '-') + r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-') + r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-') facts['master'].setdefault('loopback_cluster_name', r_lhn) facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn)) - facts['master'].setdefault('loopback_user', "system:openshift-master/{0}".format(r_lhn)) + facts['master'].setdefault('loopback_user', r_lhu) prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)] for prefix, host in prefix_hosts: @@ -711,8 +695,8 @@ def set_deployment_facts_if_unset(facts): if 'node' in facts: deployment_type = facts['common']['deployment_type'] if 'storage_plugin_deps' not in facts['node']: - if deployment_type in ['openshift-enterprise', 'atomic-enterprise']: - facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs'] + if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']: + facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] else: facts['node']['storage_plugin_deps'] = [] @@ -720,7 +704,7 @@ def set_deployment_facts_if_unset(facts): def set_version_facts_if_unset(facts): """ Set version facts. This currently includes common.version and - common.version_greater_than_3_1_or_1_1. + common.version_gte_3_1_or_1_1. Args: facts (dict): existing facts @@ -732,16 +716,20 @@ def set_version_facts_if_unset(facts): facts['common']['version'] = version = get_openshift_version() if version is not None: if deployment_type == 'origin': - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6') - version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('1.1.1') + version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0') + version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0') else: - version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900') - version_gt_3_1_1_or_1_1_1 = LooseVersion(version) > LooseVersion('3.1.1') + version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905') + version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1') + version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901') else: - version_gt_3_1_or_1_1 = True - version_gt_3_1_1_or_1_1_1 = True - facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1 - facts['common']['version_greater_than_3_1_1_or_1_1_1'] = version_gt_3_1_1_or_1_1_1 + version_gte_3_1_or_1_1 = True + version_gte_3_1_1_or_1_1_1 = True + version_gte_3_2_or_1_2 = True + facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 + facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 + facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 return facts @@ -756,12 +744,12 @@ def set_manageiq_facts_if_unset(facts): OpenShiftFactsInternalError: """ if 'common' not in facts: - if 'version_greater_than_3_1_or_1_1' not in facts['common']: + if 'version_gte_3_1_or_1_1' not in facts['common']: raise OpenShiftFactsInternalError( "Invalid invocation: The required facts are not set" ) if 'use_manageiq' not in facts['common']: - facts['common']['use_manageiq'] = facts['common']['version_greater_than_3_1_or_1_1'] + facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1'] return facts @@ -928,41 +916,79 @@ def apply_provider_facts(facts, provider_facts): facts['provider'] = provider_facts return facts - -def merge_facts(orig, new, additive_facts_to_overwrite): +# Disabling pylint too many branches. This function needs refactored +# but is a very core part of openshift_facts. +# pylint: disable=too-many-branches +def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite): """ Recursively merge facts dicts Args: orig (dict): existing facts new (dict): facts to update - additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Returns: dict: the merged facts """ additive_facts = ['named_certificates'] + protected_facts = ['ha', 'master_count'] facts = dict() for key, value in orig.iteritems(): + # Key exists in both old and new facts. if key in new: + # Continue to recurse if old and new fact is a dictionary. if isinstance(value, dict) and isinstance(new[key], dict): + # Collect the subset of additive facts to overwrite if + # key matches. These will be passed to the subsequent + # merge_facts call. relevant_additive_facts = [] - # Keep additive_facts_to_overwrite if key matches for item in additive_facts_to_overwrite: if '.' in item and item.startswith(key + '.'): relevant_additive_facts.append(item) - facts[key] = merge_facts(value, new[key], relevant_additive_facts) + + # Collect the subset of protected facts to overwrite + # if key matches. These will be passed to the + # subsequent merge_facts call. + relevant_protected_facts = [] + for item in protected_facts_to_overwrite: + if '.' in item and item.startswith(key + '.'): + relevant_protected_facts.append(item) + facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts) + # Key matches an additive fact and we are not overwriting + # it so we will append the new value to the existing value. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]: - # Fact is additive so we'll combine orig and new. if isinstance(value, list) and isinstance(new[key], list): new_fact = [] - for item in copy.deepcopy(value) + copy.copy(new[key]): + for item in copy.deepcopy(value) + copy.deepcopy(new[key]): if item not in new_fact: new_fact.append(item) facts[key] = new_fact + # Key matches a protected fact and we are not overwriting + # it so we will determine if it is okay to change this + # fact. + elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]: + # The master count (int) can only increase unless it + # has been passed as a protected fact to overwrite. + if key == 'master_count': + if int(value) <= int(new[key]): + facts[key] = copy.deepcopy(new[key]) + else: + module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') + # ha (bool) can not change unless it has been passed + # as a protected fact to overwrite. + if key == 'ha': + if bool(value) != bool(new[key]): + module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') + else: + facts[key] = value + # No other condition has been met. Overwrite the old fact + # with the new value. else: - facts[key] = copy.copy(new[key]) + facts[key] = copy.deepcopy(new[key]) + # Key isn't in new so add it to facts to keep it. else: facts[key] = copy.deepcopy(value) new_keys = set(new.keys()) - set(orig.keys()) @@ -1070,6 +1096,28 @@ def set_container_facts_if_unset(facts): return facts +def set_installed_variant_rpm_facts(facts): + """ Set RPM facts of installed variant + Args: + facts (dict): existing facts + Returns: + dict: the facts dict updated with installed_variant_rpms + """ + installed_rpms = [] + for base_rpm in ['openshift', 'atomic-openshift', 'origin']: + optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] + variant_rpms = [base_rpm] + \ + ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ + ['tuned-profiles-%s-node' % base_rpm] + for rpm in variant_rpms: + exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) + if exit_code == 0: + installed_rpms.append(rpm) + + facts['common']['installed_variant_rpms'] = installed_rpms + return facts + + class OpenShiftFactsInternalError(Exception): """Origin Facts Error""" @@ -1104,13 +1152,20 @@ class OpenShiftFacts(object): local_facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Raises: OpenShiftFactsUnsupportedRoleError: """ - known_roles = ['common', 'master', 'node', 'etcd', 'nfs'] - - def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False): + known_roles = ['common', 'master', 'node', 'etcd', 'hosted'] + + # Disabling too-many-arguments, this should be cleaned up as a TODO item. + # pylint: disable=too-many-arguments + def __init__(self, role, filename, local_facts, + additive_facts_to_overwrite=None, + openshift_env=None, + protected_facts_to_overwrite=None): self.changed = False self.filename = filename if role not in self.known_roles: @@ -1119,31 +1174,44 @@ class OpenShiftFacts(object): ) self.role = role self.system_facts = ansible_facts(module) - self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite) - - def generate_facts(self, local_facts, additive_facts_to_overwrite): + self.facts = self.generate_facts(local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite) + + def generate_facts(self, + local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite): """ Generate facts Args: - local_facts (dict): local_facts for overriding generated - defaults + local_facts (dict): local_facts for overriding generated defaults additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] - + openshift_env (dict): openshift_env facts for overriding generated defaults + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] Returns: dict: The generated facts """ - local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite) + local_facts = self.init_local_facts(local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite) roles = local_facts.keys() defaults = self.get_defaults(roles) provider_facts = self.init_provider_facts() facts = apply_provider_facts(defaults, provider_facts) - facts = merge_facts(facts, local_facts, additive_facts_to_overwrite) + facts = merge_facts(facts, + local_facts, + additive_facts_to_overwrite, + protected_facts_to_overwrite) facts['current_config'] = get_current_config(facts) facts = set_url_facts_if_unset(facts) facts = set_project_cfg_facts_if_unset(facts) - facts = set_fluentd_facts_if_unset(facts) facts = set_flannel_facts_if_unset(facts) facts = set_nuage_facts_if_unset(facts) facts = set_node_schedulability(facts) @@ -1157,6 +1225,8 @@ class OpenShiftFacts(object): facts = set_aggregate_facts(facts) facts = set_etcd_facts_if_unset(facts) facts = set_container_facts_if_unset(facts) + if not facts['common']['is_containerized']: + facts = set_installed_variant_rpm_facts(facts) return dict(openshift=facts) def get_defaults(self, roles): @@ -1205,10 +1275,23 @@ class OpenShiftFacts(object): iptables_sync_period='5s', set_node_ip=False) defaults['node'] = node - if 'nfs' in roles: - nfs = dict(exports_dir='/var/export', registry_volume='regvol', - export_options='*(rw,sync,all_squash)') - defaults['nfs'] = nfs + defaults['hosted'] = dict( + registry=dict( + storage=dict( + kind=None, + volume=dict( + name='registry', + size='5Gi' + ), + nfs=dict( + directory='/exports', + options='*(rw,root_squash)'), + host=None, + access_modes=['ReadWriteMany'], + create_pv=True + ) + ) + ) return defaults @@ -1287,23 +1370,52 @@ class OpenShiftFacts(object): ) return provider_facts - def init_local_facts(self, facts=None, additive_facts_to_overwrite=False): + # Disabling too-many-branches. This should be cleaned up as a TODO item. + #pylint: disable=too-many-branches + def init_local_facts(self, facts=None, + additive_facts_to_overwrite=None, + openshift_env=None, + protected_facts_to_overwrite=None): """ Initialize the provider facts Args: facts (dict): local facts to set additive_facts_to_overwrite (list): additive facts to overwrite in jinja '.' notation ex: ['master.named_certificates'] + openshift_env (dict): openshift env facts to set + protected_facts_to_overwrite (list): protected facts to overwrite in jinja + '.' notation ex: ['master.master_count'] + Returns: dict: The result of merging the provided facts with existing local facts """ changed = False - facts_to_set = {self.role: dict()} + + facts_to_set = dict() + if facts is not None: facts_to_set[self.role] = facts + if openshift_env != {} and openshift_env != None: + for fact, value in openshift_env.iteritems(): + oo_env_facts = dict() + current_level = oo_env_facts + keys = fact.split('_')[1:] + if keys[0] != self.role: + continue + for key in keys: + if key == keys[-1]: + current_level[key] = value + elif key not in current_level: + current_level[key] = dict() + current_level = current_level[key] + facts_to_set = merge_facts(orig=facts_to_set, + new=oo_env_facts, + additive_facts_to_overwrite=[], + protected_facts_to_overwrite=[]) + local_facts = get_local_facts_from_file(self.filename) for arg in ['labels', 'annotations']: @@ -1311,14 +1423,18 @@ class OpenShiftFacts(object): basestring): facts_to_set[arg] = module.from_json(facts_to_set[arg]) - new_local_facts = merge_facts(local_facts, facts_to_set, additive_facts_to_overwrite) + new_local_facts = merge_facts(local_facts, + facts_to_set, + additive_facts_to_overwrite, + protected_facts_to_overwrite) for facts in new_local_facts.values(): keys_to_delete = [] - for fact, value in facts.iteritems(): - if value == "" or value is None: - keys_to_delete.append(fact) - for key in keys_to_delete: - del facts[key] + if isinstance(facts, dict): + for fact, value in facts.iteritems(): + if value == "" or value is None: + keys_to_delete.append(fact) + for key in keys_to_delete: + del facts[key] if new_local_facts != local_facts: self.validate_local_facts(new_local_facts) @@ -1406,6 +1522,8 @@ def main(): choices=OpenShiftFacts.known_roles), local_facts=dict(default=None, type='dict', required=False), additive_facts_to_overwrite=dict(default=[], type='list', required=False), + openshift_env=dict(default={}, type='dict', required=False), + protected_facts_to_overwrite=dict(default=[], type='list', required=False), ), supports_check_mode=True, add_file_common_args=True, @@ -1414,9 +1532,17 @@ def main(): role = module.params['role'] local_facts = module.params['local_facts'] additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] + openshift_env = module.params['openshift_env'] + protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] + fact_file = '/etc/ansible/facts.d/openshift.fact' - openshift_facts = OpenShiftFacts(role, fact_file, local_facts, additive_facts_to_overwrite) + openshift_facts = OpenShiftFacts(role, + fact_file, + local_facts, + additive_facts_to_overwrite, + openshift_env, + protected_facts_to_overwrite) file_params = module.params.copy() file_params['path'] = fact_file diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index 06f12053a..cee1f1738 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -3,7 +3,7 @@ {{ openshift.common.client_binary }} get node {{ item | lower }} register: omd_get_node until: omd_get_node.rc == 0 - retries: 20 + retries: 50 delay: 5 changed_when: false with_items: openshift_nodes diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index 0357fc85a..d2ff1b4b7 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -1,7 +1,7 @@ --- - fail: msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." - when: not openshift.common.version_greater_than_3_1_or_1_1 | bool + when: not openshift.common.version_gte_3_1_or_1_1 | bool - name: Copy Configuration to temporary conf command: > @@ -9,7 +9,7 @@ changed_when: false - name: Add Managment Infrastructure project - command: > + command: > {{ openshift.common.admin_binary }} new-project management-infra --description="Management Infrastructure" @@ -20,9 +20,9 @@ - name: Create Service Account shell: > - echo {{ manageiq_service_account | to_json | quote }} | - {{ openshift.common.client_binary }} create - -n management-infra + echo {{ manageiq_service_account | to_json | quote }} | + {{ openshift.common.client_binary }} create + -n management-infra --config={{manage_iq_tmp_conf}} -f - register: osmiq_create_service_account @@ -32,7 +32,7 @@ - name: Create Cluster Role shell: > echo {{ manageiq_cluster_role | to_json | quote }} | - {{ openshift.common.client_binary }} create + {{ openshift.common.client_binary }} create --config={{manage_iq_tmp_conf}} -f - register: osmiq_create_cluster_role diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aa5e593b6..dd66eeebb 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -15,7 +15,7 @@ when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"])) - fail: msg: "'native' high availability is not supported for the requested OpenShift version" - when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_greater_than_3_1_or_1_1 | bool + when: openshift_master_ha | bool and openshift_master_cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool - fail: msg: "openshift_master_cluster_password must be set for multi-master installations" when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password) @@ -221,6 +221,9 @@ template: dest: "{{ openshift.master.session_secrets_file }}" src: sessionSecretsFile.yaml.v1.j2 + owner: root + group: root + mode: 0600 when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined notify: - restart master @@ -235,41 +238,16 @@ dest: "{{ openshift_master_config_file }}" src: master.yaml.v1.j2 backup: true + owner: root + group: root + mode: 0600 notify: - restart master - restart master api - restart master controllers -- name: Test local loopback context - command: > - {{ openshift.common.client_binary }} config view - --config={{ openshift_master_loopback_config }} - changed_when: false - register: loopback_config - -- command: > - {{ openshift.common.client_binary }} config set-cluster - --certificate-authority={{ openshift_master_config_dir }}/ca.crt - --embed-certs=true --server={{ openshift.master.loopback_api_url }} - {{ openshift.master.loopback_cluster_name }} - --config={{ openshift_master_loopback_config }} - when: loopback_context_string not in loopback_config.stdout - register: set_loopback_cluster - -- command: > - {{ openshift.common.client_binary }} config set-context - --cluster={{ openshift.master.loopback_cluster_name }} - --namespace=default --user={{ openshift.master.loopback_user }} - {{ openshift.master.loopback_context_name }} - --config={{ openshift_master_loopback_config }} - when: set_loopback_cluster | changed - register: set_loopback_context - -- command: > - {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} - --config={{ openshift_master_loopback_config }} - when: set_loopback_context | changed - register: set_current_context +- include: set_loopback_context.yml + when: openshift.common.version_gte_3_2_or_1_2 - name: Start and enable master service: name={{ openshift.common.service_type }}-master enabled=yes state=started @@ -285,6 +263,10 @@ master_service_status_changed: "{{ start_result | changed }}" when: not openshift_master_ha | bool +- name: Mask master service + command: systemctl mask {{ openshift.common.service_type }}-master + when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and not openshift.common.is_containerized | bool + - name: Start and enable master api service: name={{ openshift.common.service_type }}-master-api enabled=yes state=started when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml new file mode 100644 index 000000000..9c3fb31dc --- /dev/null +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -0,0 +1,31 @@ +--- +- name: Test local loopback context + command: > + {{ openshift.common.client_binary }} config view + --config={{ openshift_master_loopback_config }} + changed_when: false + register: loopback_config + +- command: > + {{ openshift.common.client_binary }} config set-cluster + --certificate-authority={{ openshift_master_config_dir }}/ca.crt + --embed-certs=true --server={{ openshift.master.loopback_api_url }} + {{ openshift.master.loopback_cluster_name }} + --config={{ openshift_master_loopback_config }} + when: loopback_context_string not in loopback_config.stdout + register: set_loopback_cluster + +- command: > + {{ openshift.common.client_binary }} config set-context + --cluster={{ openshift.master.loopback_cluster_name }} + --namespace=default --user={{ openshift.master.loopback_user }} + {{ openshift.master.loopback_context_name }} + --config={{ openshift_master_loopback_config }} + when: set_loopback_cluster | changed + register: set_loopback_context + +- command: > + {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} + --config={{ openshift_master_loopback_config }} + when: set_loopback_context | changed + register: set_current_context diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 1eeab46fe..813a58d60 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,5 +1,5 @@ apiLevels: -{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} +{% if not openshift.common.version_gte_3_1_or_1_1 | bool %} - v1beta3 {% endif %} - v1 @@ -91,7 +91,7 @@ kubeletClientInfo: port: 10250 {% if openshift.master.embedded_kube | bool %} kubernetesMasterConfig: -{% if not openshift.common.version_greater_than_3_1_or_1_1 | bool %} +{% if not openshift.common.version_gte_3_1_or_1_1 | bool %} apiLevels: - v1beta3 - v1 diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 index e6e97b24f..b393bb9ff 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 @@ -7,7 +7,7 @@ Before={{ openshift.common.service_type }}-node.service Requires=network.target [Service] -{% if openshift.common.version_greater_than_3_1_1_or_1_1_1 | bool %} +{% if openshift.common.version_gte_3_1_1_or_1_1_1 | bool %} Type=notify {% else %} Type=simple diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml index 6d9be81c0..66960e73e 100644 --- a/roles/openshift_master_ca/tasks/main.yml +++ b/roles/openshift_master_ca/tasks/main.yml @@ -25,4 +25,4 @@ --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_master_config_dir }} --overwrite=false - when: master_certs_missing + when: master_certs_missing | bool diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index 36d953111..72869a592 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -6,40 +6,16 @@ mode: 0700 with_items: masters_needing_certs -- set_fact: - master_certificates: - - ca.crt - - ca.key - - ca.serial.txt - - admin.crt - - admin.key - - admin.kubeconfig - - master.kubelet-client.crt - - master.kubelet-client.key - - master.server.crt - - master.server.key - - openshift-master.crt - - openshift-master.key - - openshift-master.kubeconfig - - openshift-registry.crt - - openshift-registry.key - - openshift-registry.kubeconfig - - openshift-router.crt - - openshift-router.key - - openshift-router.kubeconfig - - serviceaccounts.private.key - - serviceaccounts.public.key - master_31_certificates: - - master.proxy-client.crt - - master.proxy-client.key - - file: src: "{{ openshift_master_config_dir }}/{{ item.1 }}" dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}" state: hard with_nested: - masters_needing_certs - - "{{ master_certificates | union(master_31_certificates) if openshift.common.version_greater_than_3_1_or_1_1 | bool else master_certificates }}" + - + - ca.crt + - ca.key + - ca.serial.txt - name: Create the master certificates if they do not already exist command: > @@ -49,5 +25,5 @@ --public-master={{ item.openshift.master.public_api_url }} --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }} --overwrite=false - when: master_certs_missing + when: item.master_certs_missing | bool with_items: masters_needing_certs diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 9035248f9..43253d72b 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -17,7 +17,7 @@ # TODO: Replace this with a lookup or filter plugin. dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip - | default(None if openshift.common.version_greater_than_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}" + | default(None if openshift.common.version_gte_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}" - role: node local_facts: annotations: "{{ openshift_node_annotations | default(none) }}" @@ -33,6 +33,7 @@ set_node_ip: "{{ openshift_set_node_ip | default(None) }}" node_image: "{{ osn_image | default(None) }}" ovs_image: "{{ osn_ovs_image | default(None) }}" + proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. @@ -83,6 +84,9 @@ dest: "{{ openshift_node_config_file }}" src: node.yaml.v1.j2 backup: true + owner: root + group: root + mode: 0600 notify: - restart node diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml new file mode 100644 index 000000000..d6684b34a --- /dev/null +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -0,0 +1,4 @@ +--- +- name: Install iSCSI storage plugin dependencies + action: "{{ ansible_pkg_mgr }} name=iscsi-initiator-utils state=present" + when: not openshift.common.is_atomic | bool diff --git a/roles/openshift_node/tasks/storage_plugins/main.yml b/roles/openshift_node/tasks/storage_plugins/main.yml index 39c7b9390..fe638718d 100644 --- a/roles/openshift_node/tasks/storage_plugins/main.yml +++ b/roles/openshift_node/tasks/storage_plugins/main.yml @@ -11,3 +11,7 @@ - name: Ceph storage plugin configuration include: ceph.yml when: "'ceph' in openshift.node.storage_plugin_deps" + +- name: iSCSI storage plugin configuration + include: iscsi.yml + when: "'iscsi' in openshift.node.storage_plugin_deps" diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 44065f4bd..67975d372 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -35,3 +35,6 @@ servingInfo: keyFile: server.key volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes {% include 'partials/kubeletArguments.j2' %} +proxyArguments: + proxy-mode: + - {{ openshift.node.proxy_mode }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 7a11a10fa..df3e0a44a 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -11,7 +11,7 @@ PartOf=docker.service [Service] EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system {{ openshift.node.node_image }} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log {{ openshift.node.node_image }} ExecStartPost=/usr/bin/sleep 10 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node Restart=always diff --git a/roles/openshift_persistent_volumes/README.md b/roles/openshift_persistent_volumes/README.md new file mode 100644 index 000000000..34ae89536 --- /dev/null +++ b/roles/openshift_persistent_volumes/README.md @@ -0,0 +1,60 @@ +OpenShift NFS Server +==================== + +OpenShift Persistent Volumes + +Requirements +------------ + +Role Variables +-------------- + +From this role: +| Name | Default value | | +|--------------------------|---------------|-------------------------------------------------------------------------------------| +| persistent_volumes | [] | List of persistent volume dictionaries, keys: name, capacity, access_modes, storage | +| persistent_volume_claims | [] | List of persistent volume claim dictionaries, keys: name, capacity, access_modes | + + +From openshift_common: +| Name | Default Value | | +|-------------------------------|----------------|----------------------------------------| +| openshift_debug_level | 2 | Global openshift debug log verbosity | + + +Dependencies +------------ + + +Example Playbook +---------------- + +- name: Create persistent volumes/claims + hosts: oo_first_master + vars: + persistent_volumes: + - name: "registry-volume" + capacity: "5Gi" + access_modes: + - "ReadWriteMany" + storage: + nfs: + server: "nfs.example.com" + path: "/var/exports/registry" + persistent_volume_claims: + - name: "registry-claim" + capacity: "5Gi" + access_modes: + - "ReadWriteMany" + roles: + - role: openshift_persistent_volumes + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Andrew Butcher (abutcher@redhat.com) diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml new file mode 100644 index 000000000..d9f6fc01a --- /dev/null +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Andrew Butcher + description: OpenShift Persistent Volumes + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_persistent_volumes/tasks/main.yml b/roles/openshift_persistent_volumes/tasks/main.yml new file mode 100644 index 000000000..e431e978c --- /dev/null +++ b/roles/openshift_persistent_volumes/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: Create temp directory for volume definitions + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + changed_when: False + +- name: Copy the admin client config(s) + command: > + cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + changed_when: False + +- name: Deploy PersistentVolume definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volumes.yml" + src: persistent-volume.yml.j2 + when: persistent_volumes | length > 0 + changed_when: False + +- name: Create PersistentVolumes + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volumes.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pv_create_output + when: persistent_volumes | length > 0 + failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout) + changed_when: ('created' in pv_create_output.stdout) + +- name: Deploy PersistentVolumeClaim definitions + template: + dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml" + src: persistent-volume-claim.yml.j2 + when: persistent_volume_claims | length > 0 + changed_when: False + +- name: Create PersistentVolumeClaims + command: > + {{ openshift.common.client_binary }} create + -f {{ mktemp.stdout }}/persistent-volume-claims.yml + --config={{ mktemp.stdout }}/admin.kubeconfig + register: pvc_create_output + when: persistent_volume_claims | length > 0 + failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout) + changed_when: ('created' in pvc_create_output.stdout) + +- name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + changed_when: False diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 new file mode 100644 index 000000000..d40417a9a --- /dev/null +++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: "v1" +kind: "List" +items: +{% for claim in persistent_volume_claims %} +- apiVersion: "v1" + kind: "PersistentVolumeClaim" + metadata: + name: "{{ claim.name }}" + spec: + accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }} + resources: + requests: + storage: "{{ claim.capacity }}" +{% endfor %} diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 new file mode 100644 index 000000000..877e88002 --- /dev/null +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: List +items: +{% for volume in persistent_volumes %} +- apiVersion: v1 + kind: PersistentVolume + metadata: + name: "{{ volume.name }}" + spec: + capacity: + storage: "{{ volume.capacity }}" + accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} + {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }} +{% endfor %} diff --git a/roles/openshift_persistent_volumes/vars/main.yml b/roles/openshift_persistent_volumes/vars/main.yml new file mode 100644 index 000000000..9967e26f4 --- /dev/null +++ b/roles/openshift_persistent_volumes/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_master_config_dir: "{{ openshift.common.config_base }}/master" diff --git a/roles/openshift_registry/README.md b/roles/openshift_registry/README.md index 8e66c483b..247272668 100644 --- a/roles/openshift_registry/README.md +++ b/roles/openshift_registry/README.md @@ -17,12 +17,6 @@ From this role: |--------------------|-------------------------------------------------------|---------------------| | | | | -From openshift_common: - -| Name | Default value | | -|-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - Dependencies ------------ diff --git a/roles/openshift_registry/defaults/main.yml b/roles/openshift_registry/defaults/main.yml new file mode 100644 index 000000000..17a0d5301 --- /dev/null +++ b/roles/openshift_registry/defaults/main.yml @@ -0,0 +1,2 @@ +--- +registry_volume_claim: 'registry-claim' diff --git a/roles/openshift_registry/meta/main.yml b/roles/openshift_registry/meta/main.yml index 93b6797d1..b220a020e 100644 --- a/roles/openshift_registry/meta/main.yml +++ b/roles/openshift_registry/meta/main.yml @@ -4,10 +4,12 @@ galaxy_info: description: OpenShift Embedded Docker Registry company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.7 + min_ansible_version: 1.9 platforms: - name: EL versions: - 7 categories: - cloud + dependencies: + - openshift_facts diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml index 2804e8f2e..1eeec2fbb 100644 --- a/roles/openshift_registry/tasks/main.yml +++ b/roles/openshift_registry/tasks/main.yml @@ -1,28 +1,24 @@ --- -- set_fact: _oreg_images="--images='{{ openshift.master.registry_url }}'" - -- set_fact: _oreg_selector="--selector='{{ openshift.master.registry_selector }}'" - - name: Deploy OpenShift Registry command: > {{ openshift.common.admin_binary }} registry - --create --service-account=registry {{ _oreg_selector }} - --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images }} - register: _oreg_results - changed_when: "'service exists' not in _oreg_results.stdout" + --create --replicas={{ openshift.master.infra_nodes | length }} + --service-account=registry {{ oreg_selector }} + --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ oreg_images }} + register: oreg_results + changed_when: "'service exists' not in oreg_results.stdout" -- name: Determine if nfs volume is already attached +- name: Determine if volume is already attached to dc/docker-registry command: "{{ openshift.common.client_binary }} get -o template dc/docker-registry --template=\\{\\{.spec.template.spec.volumes\\}\\}" + changed_when: false register: registry_volumes_output - when: attach_registry_volume | bool - set_fact: - volume_already_attached: "{{ 'server:' + nfs_host in registry_volumes_output.stdout and 'path:' + registry_volume_path in registry_volumes_output.stdout }}" - when: attach_registry_volume | bool + volume_attached: "{{ registry_volume_claim in registry_volumes_output.stdout }}" -- name: Add nfs volume to dc/docker-registry +- name: Add volume to dc/docker-registry command: > {{ openshift.common.client_binary }} volume dc/docker-registry - --add --overwrite --name=registry-storage --mount-path=/registry - --source='{"nfs": {"server": "{{ nfs_host }}", "path": "{{ registry_volume_path }}"}}' - when: attach_registry_volume | bool and not volume_already_attached | bool + --add --overwrite -t persistentVolumeClaim --claim-name={{ registry_volume_claim }} + --name=registry-storage + when: not volume_attached | bool diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml index 9967e26f4..306350a5a 100644 --- a/roles/openshift_registry/vars/main.yml +++ b/roles/openshift_registry/vars/main.yml @@ -1,2 +1,4 @@ --- openshift_master_config_dir: "{{ openshift.common.config_base }}/master" +oreg_images: "--images='{{ openshift.master.registry_url }}'" +oreg_selector: "--selector='{{ openshift.master.registry_selector }}'" diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 8a75639c2..6143805ca 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -46,19 +46,19 @@ with_fileglob: - '*/repos/*' when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and - (ansible_distribution == "Fedora") + (ansible_distribution == "Fedora") and not openshift.common.is_containerized | bool notify: refresh cache - name: Configure gpg keys if needed - copy: src={{ item }} dest=/etc/pki/rpm-gpg/ + copy: src="{{ item }}" dest=/etc/pki/rpm-gpg/ with_fileglob: - "{{ openshift_deployment_type }}/gpg_keys/*" notify: refresh cache when: not openshift.common.is_containerized | bool - name: Configure yum repositories RHEL/CentOS - copy: src={{ item }} dest=/etc/yum.repos.d/ + copy: src="{{ item }}" dest=/etc/yum.repos.d/ with_fileglob: - "{{ openshift_deployment_type }}/repos/*" notify: refresh cache @@ -66,7 +66,7 @@ and not openshift.common.is_containerized | bool - name: Configure yum repositories Fedora - copy: src={{ item }} dest=/etc/yum.repos.d/ + copy: src="{{ item }}" dest=/etc/yum.repos.d/ with_fileglob: - "fedora-{{ openshift_deployment_type }}/repos/*" notify: refresh cache diff --git a/roles/openshift_router/README.md b/roles/openshift_router/README.md index 836efc443..d490e1038 100644 --- a/roles/openshift_router/README.md +++ b/roles/openshift_router/README.md @@ -16,11 +16,6 @@ From this role: |--------------------|-------------------------------------------------------|---------------------| | | | | -From openshift_common: -| Name | Default value | | -|-----------------------|---------------|--------------------------------------| -| openshift_debug_level | 2 | Global openshift debug log verbosity | - Dependencies ------------ diff --git a/roles/openshift_router/meta/main.yml b/roles/openshift_router/meta/main.yml index 0471e5e14..c2b0777b5 100644 --- a/roles/openshift_router/meta/main.yml +++ b/roles/openshift_router/meta/main.yml @@ -4,10 +4,12 @@ galaxy_info: description: OpenShift Embedded Router company: Red Hat, Inc. license: Apache License, Version 2.0 - min_ansible_version: 1.7 + min_ansible_version: 1.9 platforms: - name: EL versions: - 7 categories: - cloud + dependencies: + - openshift_facts diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml index 355cbf84b..40365d04d 100644 --- a/roles/openshift_router/tasks/main.yml +++ b/roles/openshift_router/tasks/main.yml @@ -3,6 +3,7 @@ command: > {{ openshift.common.admin_binary }} router --create --replicas={{ openshift.master.infra_nodes | length }} + --namespace=default --service-account=router {{ ortr_selector }} --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ ortr_images }} register: ortr_results diff --git a/roles/openshift_serviceaccounts/meta/main.yml b/roles/openshift_serviceaccounts/meta/main.yml new file mode 100644 index 000000000..a2c9fee70 --- /dev/null +++ b/roles/openshift_serviceaccounts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: OpenShift Operations + description: OpenShift Service Accounts + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.9 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml new file mode 100644 index 000000000..1efab9466 --- /dev/null +++ b/roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml @@ -0,0 +1,37 @@ +#### +# +# OSE 3.0.z did not have 'oadm policy add-scc-to-user'. +# +#### + +- name: tmp dir for openshift + file: + path: /tmp/openshift + state: directory + owner: root + mode: 700 + +- name: Create service account configs + template: + src: serviceaccount.j2 + dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" + with_items: openshift_serviceaccounts_names + +- name: Get current security context constraints + shell: > + {{ openshift.common.client_binary }} get scc privileged -o yaml + --output-version=v1 > /tmp/openshift/scc.yaml + changed_when: false + +- name: Add security context constraint for {{ item }} + lineinfile: + dest: /tmp/openshift/scc.yaml + line: "- system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}" + insertafter: "^users:$" + when: "item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" + with_nested: + - openshift_serviceaccounts_names + - scc_test.results + +- name: Apply new scc rules for service accounts + command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml index 4c7faa6fe..f34fa7b74 100644 --- a/roles/openshift_serviceaccounts/tasks/main.yml +++ b/roles/openshift_serviceaccounts/tasks/main.yml @@ -1,36 +1,36 @@ -- name: tmp dir for openshift - file: - path: /tmp/openshift - state: directory - owner: root - mode: 700 - -- name: Create service account configs - template: - src: serviceaccount.j2 - dest: "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: accounts - -- name: Create {{ item }} service account +- name: test if service accounts exists command: > - {{ openshift.common.client_binary }} create -f "/tmp/openshift/{{ item }}-serviceaccount.yaml" - with_items: accounts - register: _sa_result - failed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc != 0" - changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0" + {{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }} + with_items: openshift_serviceaccounts_names + failed_when: false + changed_when: false + register: account_test -- name: Get current security context constraints +- name: create the service account shell: > - {{ openshift.common.client_binary }} get scc privileged -o yaml - --output-version=v1 > /tmp/openshift/scc.yaml + echo {{ lookup('template', '../templates/serviceaccount.j2') + | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} create -f - + when: item.1.rc != 0 + with_together: + - openshift_serviceaccounts_names + - account_test.results + +- name: test if scc needs to be updated + command: > + {{ openshift.common.client_binary }} get scc {{ item }} -o yaml changed_when: false + failed_when: false + register: scc_test + with_items: openshift_serviceaccounts_sccs -- name: Add security context constraint for {{ item }} - lineinfile: - dest: /tmp/openshift/scc.yaml - line: "- system:serviceaccount:default:{{ item }}" - insertafter: "^users:$" - with_items: accounts +- name: Grant the user access to the privileged scc + command: > + {{ openshift.common.admin_binary }} policy add-scc-to-user + privileged system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }} + when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users }}" + with_nested: + - openshift_serviceaccounts_names + - scc_test.results -- name: Apply new scc rules for service accounts - command: "{{ openshift.common.client_binary }} update -f /tmp/openshift/scc.yaml --api-version=v1" +- include: legacy_add_scc_to_user.yml + when: not openshift.common.version_gte_3_1_or_1_1 diff --git a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 index 931e249f9..c5f12421f 100644 --- a/roles/openshift_serviceaccounts/templates/serviceaccount.j2 +++ b/roles/openshift_serviceaccounts/templates/serviceaccount.j2 @@ -1,4 +1,4 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ item }} + name: {{ item.0 }} diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md index 548e146cb..dd988b849 100644 --- a/roles/openshift_storage_nfs/README.md +++ b/roles/openshift_storage_nfs/README.md @@ -15,11 +15,11 @@ Role Variables -------------- From this role: -| Name | Default value | | -|-------------------------------|-----------------------|--------------------------------------------------| -| openshift_nfs_exports_dir | /var/export | Root export directory. | -| openshift_nfs_registry_volume | regvol | Registry volume within openshift_nfs_exports_dir | -| openshift_nfs_export_options | *(rw,sync,all_squash) | NFS options for configured exports. | +| Name | Default value | | +|-------------------------------------------------|-----------------------|-------------------------------------------------------------| +| openshift_hosted_registry_storage_nfs_directory | /exports | Root export directory. | +| openshift_hosted_registry_storage_volume_name | registry | Registry volume within openshift_hosted_registry_volume_dir | +| openshift_hosted_registry_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. | From openshift_common: @@ -31,8 +31,6 @@ From openshift_common: Dependencies ------------ - - Example Playbook ---------------- diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml index e25062c00..5f6893129 100644 --- a/roles/openshift_storage_nfs/defaults/main.yml +++ b/roles/openshift_storage_nfs/defaults/main.yml @@ -1,7 +1,13 @@ --- -exports_dir: /var/export -registry_volume: regvol -export_options: '*(rw,sync,all_squash)' +openshift: + hosted: + registry: + storage: + nfs: + directory: "/exports" + options: "*(rw,root_squash)" + volume: + name: "registry" os_firewall_use_firewalld: False os_firewall_allow: - service: nfs diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml index 64b121ade..fdd7bd3f1 100644 --- a/roles/openshift_storage_nfs/tasks/main.yml +++ b/roles/openshift_storage_nfs/tasks/main.yml @@ -1,31 +1,34 @@ --- -- name: Set nfs facts - openshift_facts: - role: nfs - local_facts: - exports_dir: "{{ openshift_nfs_exports_dir | default(None) }}" - export_options: "{{ openshift_nfs_export_options | default(None) }}" - registry_volume: "{{ openshift_nfs_registry_volume | default(None) }}" - - name: Install nfs-utils yum: pkg: nfs-utils state: present +- name: Configure NFS + lineinfile: + dest: /etc/sysconfig/nfs + regexp: '^RPCNFSDARGS=.*$' + line: 'RPCNFSDARGS="-N 2 -N 3"' + register: nfs_config + +- name: Restart nfs-config + service: name=nfs-config state=restarted + when: nfs_config | changed + - name: Ensure exports directory exists file: - path: "{{ openshift.nfs.exports_dir }}" + path: "{{ openshift.hosted.registry.storage.nfs.directory }}" state: directory - name: Ensure export directories exist file: - path: "{{ openshift.nfs.exports_dir }}/{{ item }}" + path: "{{ openshift.hosted.registry.storage.nfs.directory }}/{{ item }}" state: directory mode: 0777 owner: nfsnobody group: nfsnobody with_items: - - "{{ openshift.nfs.registry_volume }}" + - "{{ openshift.hosted.registry.storage.volume.name }}" - name: Configure exports template: @@ -44,6 +47,4 @@ - nfs-server - set_fact: - nfs_service_status_changed: "{{ True in (start_result.results - | map(attribute='changed') - | list) }}" + nfs_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2 index 702473040..c1e1994b0 100644 --- a/roles/openshift_storage_nfs/templates/exports.j2 +++ b/roles/openshift_storage_nfs/templates/exports.j2 @@ -1 +1 @@ -{{ openshift.nfs.exports_dir }}/{{ openshift.nfs.registry_volume }} {{ openshift.nfs.export_options }} +{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }} diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 5cf4bf7af..3b584f8eb 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -1,12 +1,4 @@ --- -- name: Install iptables packages - action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" - with_items: - - iptables - - iptables-services - register: install_result - when: not openshift.common.is_atomic | bool - - name: Check if firewalld is installed command: rpm -q firewalld register: pkg_check @@ -20,6 +12,22 @@ enabled: no when: pkg_check.rc == 0 +# TODO: submit PR upstream to add mask/unmask to service module +- name: Mask firewalld service + command: systemctl mask firewalld + register: result + changed_when: "'firewalld' in result.stdout" + when: pkg_check.rc == 0 + ignore_errors: yes + +- name: Install iptables packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" + with_items: + - iptables + - iptables-services + register: install_result + when: not openshift.common.is_atomic | bool + - name: Reload systemd units command: systemctl daemon-reload when: install_result | changed @@ -35,14 +43,6 @@ pause: seconds=10 when: result | changed -# TODO: submit PR upstream to add mask/unmask to service module -- name: Mask firewalld service - command: systemctl mask firewalld - register: result - changed_when: "'firewalld' in result.stdout" - when: pkg_check.rc == 0 - ignore_errors: yes - - name: Add iptables allow rules os_firewall_manage_iptables: name: "{{ item.service }}" diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml index 9d20eb012..e36f23a2b 100644 --- a/roles/os_zabbix/vars/template_openshift_master.yml +++ b/roles/os_zabbix/vars/template_openshift_master.yml @@ -2,16 +2,10 @@ g_template_openshift_master: name: Template Openshift Master zitems: - - name: create_app - applications: - - Openshift Master - key: create_app - - - key: openshift.master.registry.healthz - description: "Shows the health status of the cluster's docker registry" - type: int + - name: openshift.master.app.create applications: - Openshift Master + key: openshift.master.app.create - key: openshift.master.process.count description: Shows number of master processes running @@ -201,6 +195,18 @@ g_template_openshift_master: applications: - Openshift Master Metrics + - key: openshift.master.nodesnotready.count + description: "This check shows how many nodes in a cluster are in NotReady state." + type: int + applications: + - Openshift Master + + - key: openshift.master.nodesnotschedulable.count + description: "This check shows how many nodes in a cluster are not schedulable." + type: int + applications: + - Openshift Master + - key: openshift.master.apiserver.latency.summary.pods.quantile.list.5 description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed." type: int @@ -266,11 +272,6 @@ g_template_openshift_master: url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' priority: high - - name: 'Low number of etcd watchers on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' - priority: avg - - name: 'Etcd ping failed on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc' @@ -288,14 +289,14 @@ g_template_openshift_master: # Put triggers that depend on other triggers here (deps must be created first) - name: 'Application creation has failed on {HOST.NAME}' - expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1' + expression: '{Template Openshift Master:openshift.master.app.create.last(#1)}=1 and {Template Openshift Master:openshift.master.app.create.last(#2)}=1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - name: 'Application creation has failed multiple times in the last hour on {HOST.NAME}' - expression: '{Template Openshift Master:create_app.sum(1h)}>3' + expression: '{Template Openshift Master:openshift.master.app.create.sum(1h)}>3' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc' dependencies: - 'Openshift Master process not running on {HOST.NAME}' @@ -333,13 +334,6 @@ g_template_openshift_master: - 'Openshift Master process not running on {HOST.NAME}' priority: avg - - name: 'Docker Registry check failed on {HOST.NAME}' - expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1' - url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' - dependencies: - - 'Openshift Master process not running on {HOST.NAME}' - priority: high - - name: 'SkyDNS port not listening on {HOST.NAME}' expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc' @@ -354,6 +348,13 @@ g_template_openshift_master: - 'Openshift Master API health check is failing on {HOST.NAME}' priority: high + - name: 'Hosts not ready according to {HOST.NAME}' + expression: '{Template Openshift Master:openshift.master.nodesnotready.count.last(#2)}>0' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_node.asciidoc' + dependencies: + - 'Openshift Master process not running on {HOST.NAME}' + priority: high + zgraphs: - name: Openshift Master API Server Latency Pods LIST Quantiles width: 900 diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml index 252339366..e6daee8e4 100644 --- a/roles/os_zabbix/vars/template_openshift_node.yml +++ b/roles/os_zabbix/vars/template_openshift_node.yml @@ -26,7 +26,29 @@ g_template_openshift_node: applications: - Openshift Node + - key: openshift.node.registry-pods.healthy_pct + description: Shows the percentage of healthy registries in the cluster + type: int + applications: + - Openshift Node + + - key: openshift.node.registry.service.ping + description: Ping docker-registry service from node + type: int + applications: + - Openshift Node + ztriggers: + - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#2)}<100 and {Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#1)}<100' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' + priority: avg + + - name: 'Docker Registry service is unhealthy according to {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.registry.service.ping.last(#2)}<1 and {Template Openshift Node:openshift.node.registry.service.ping.last(#1)}<1' + url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc' + priority: avg + - name: 'Openshift Node process not running on {HOST.NAME}' expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1' url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' @@ -37,8 +59,8 @@ g_template_openshift_node: url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' priority: high - - name: 'OVS may not be running on {HOST.NAME}' - expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last()}<>4' + - name: '[HEAL] OVS may not be running on {HOST.NAME}' + expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last(#1)}<>4 and {Template Openshift Node:openshift.node.ovs.pids.count.last(#2)}<>4' url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc' priority: high diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 index ac950b4e5..e17092202 100644 --- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 +++ b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 @@ -43,13 +43,13 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -e ZAGG_URL={{ osohm_zagg_web_url }} \ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \ - -e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \ + -e ZAGG_CLIENT_HOSTNAME={{ oo_name }} \ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \ -e OSO_CLUSTER_GROUP={{ cluster_group }} \ -e OSO_CLUSTER_ID={{ oo_clusterid }} \ -e OSO_ENVIRONMENT={{ oo_environment }} \ - -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \ - -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \ + -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['oo_hosttype'] }} \ + -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['oo_subhosttype'] }} \ -e OSO_MASTER_HA={{ osohm_master_ha }} \ -v /etc/localtime:/etc/localtime \ -v /sys:/sys:ro \ @@ -57,11 +57,13 @@ ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} -v /var/lib/docker:/var/lib/docker:ro \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/run/openvswitch:/var/run/openvswitch \ -{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %} - -v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \ - -v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \ - -v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \ - -v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \ +{% if hostvars[inventory_hostname]['oo_hosttype'] == 'master' %} + -v /etc/origin/master/admin.kubeconfig:/etc/origin/master/admin.kubeconfig \ + -v /etc/origin/master/master.etcd-client.crt:/etc/origin/master/master.etcd-client.crt \ + -v /etc/origin/master/master.etcd-client.key:/etc/origin/master/master.etcd-client.key \ + -v /etc/origin/master/master-config.yaml:/etc/origin/master/master-config.yaml \ +{% elif hostvars[inventory_hostname]['oo_hosttype'] == 'node' %} + -v /etc/origin/node:/etc/origin/node \ {% endif %} {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }} |