From 68535500e00aa9b5f349f6055f63082556c0ea30 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 14 Apr 2016 13:13:18 -0400 Subject: start it, check for failure, reset it, start again --- roles/docker/tasks/main.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'roles/docker') diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index b15c1004e..f06e4875b 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -24,26 +24,29 @@ action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present" when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt') -# Enable docker and record if it was changed, if it was changed then we have no -# need to reset-failed. -- name: Enable the docker service +- name: Start the docker service service: name: docker enabled: yes - register: docker_enabled + state: started + register: start_result + ignore_errors: yes -# We're getting ready to start docker. This is a workaround for cases where it -# seems a package install/upgrade/downgrade has rebooted docker and crashed it. +# If docker were enabled and started before we downgraded it there's a real possibility +# that it's marked failed, so if our first attempt to start it fails reset the failure +# and start it again. - name: Reset docker service state command: systemctl reset-failed docker.service - when: not docker_enabled | changed + when: start_result | failed + register: reset_failed -- name: Start the docker service +- name: Start the docker service if it had failed service: name: docker enabled: yes state: started register: start_result + when: reset_failed | changed - set_fact: docker_service_status_changed: start_result | changed -- cgit v1.2.3