--- - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead" when: target_registry is defined and target_registry - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size" when: "openshift_hosted_logging_hostname is not defined or openshift_hosted_logging_elasticsearch_cluster_size is not defined or openshift_hosted_logging_master_public_url is not defined" - name: Create temp directory for kubeconfig command: mktemp -d /tmp/openshift-ansible-XXXXXX register: mktemp changed_when: False - name: Copy the admin client config(s) command: > cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig changed_when: False - name: Check for logging project already exists command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}' register: logging_project_result ignore_errors: True - name: "Create logging project" command: > {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging when: logging_project_result.stdout == "" - name: "Changing projects" command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging - name: "Creating logging deployer secret" command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} register: secret_output failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr" - name: "Create templates for logging accounts and the deployer" command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml register: template_output failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr" - name: "Process the logging accounts template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-account-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -" register: process_deployer_accounts failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr - name: "Set permissions for logging-deployer service account" command: > {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer register: permiss_output failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr" - name: "Set permissions for fluentd" command: > {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd_output failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" - name: "Set additional permissions for fluentd" command: > {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd2_output failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" - name: "Create ConfigMap for deployer parameters" command: > {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} register: deployer_configmap_output failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr" - name: "Process the deployer template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" register: process_deployer failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr - name: "Wait for image pull and deployer pod" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed" register: result until: result.rc == 0 retries: 15 delay: 10 - name: "Process imagestream template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}" when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry register: process_is failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr - name: "Set insecured registry" command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite" when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry - name: "Wait for imagestreams to become available" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd" when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry register: result until: result.rc == 0 failed_when: result.rc == 1 and 'not found' not in result.stderr retries: 20 delay: 10 - name: "Wait for component pods to be running" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" with_items: - es - kibana - curator register: result until: result.rc == 0 failed_when: result.rc == 1 or 'Error' in result.stderr retries: 20 delay: 10 - name: "Wait for ops component pods to be running" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running" with_items: - es-ops - kibana-ops - curator-ops when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster register: result until: result.rc == 0 failed_when: result.rc == 1 or 'Error' in result.stderr retries: 20 delay: 10 - name: "Wait for fluentd DaemonSet to exist" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd" register: result until: result.rc == 0 failed_when: result.rc == 1 or 'Error' in result.stderr retries: 20 delay: 10 - name: "Deploy fluentd by labeling the node" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{ openshift_hostname }} {{ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}" - name: "Wait for fluentd to be running" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running" register: result until: result.rc == 0 failed_when: result.rc == 1 or 'Error' in result.stderr retries: 20 delay: 10 - debug: msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually" - name: Delete temp directory file: name: "{{ mktemp.stdout }}" state: absent changed_when: False