summaryrefslogtreecommitdiffstats
path: root/roles/openshift_health_checker/test
diff options
context:
space:
mode:
Diffstat (limited to 'roles/openshift_health_checker/test')
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py164
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py137
-rw-r--r--roles/openshift_health_checker/test/conftest.py1
-rw-r--r--roles/openshift_health_checker/test/curator_test.py45
-rw-r--r--roles/openshift_health_checker/test/diagnostics_test.py50
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py111
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py203
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py127
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py143
-rw-r--r--roles/openshift_health_checker/test/etcd_imagedata_size_test.py25
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py72
-rw-r--r--roles/openshift_health_checker/test/etcd_volume_test.py14
-rw-r--r--roles/openshift_health_checker/test/fluentd_config_test.py348
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py63
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py164
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py93
-rw-r--r--roles/openshift_health_checker/test/logging_index_time_test.py170
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py8
-rw-r--r--roles/openshift_health_checker/test/mixins_test.py4
-rw-r--r--roles/openshift_health_checker/test/openshift_check_test.py92
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py23
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py13
-rw-r--r--roles/openshift_health_checker/test/package_update_test.py11
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py114
-rw-r--r--roles/openshift_health_checker/test/rpm_version_test.py6
-rw-r--r--roles/openshift_health_checker/test/search_journalctl_test.py157
-rw-r--r--roles/openshift_health_checker/test/zz_failure_summary_test.py85
27 files changed, 1853 insertions, 590 deletions
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 9383b233c..f14887303 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -3,10 +3,12 @@ import pytest
from ansible.playbook.play_context import PlayContext
from openshift_health_check import ActionModule, resolve_checks
-from openshift_checks import OpenShiftCheckException
+from openshift_health_check import copy_remote_file_to_dir, write_result_to_output_dir, write_to_output_file
+from openshift_checks import OpenShiftCheckException, FileToSave
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None,
+ run_logs=None, run_files=None, changed=False, get_var_return=None):
"""Returns a new class that is compatible with OpenShiftCheck for testing."""
_name, _tags = name, tags
@@ -15,18 +17,30 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
name = _name
tags = _tags or []
- def __init__(self, execute_module=None):
- pass
+ def __init__(self, **_):
+ self.changed = False
+ self.failures = []
+ self.logs = run_logs or []
+ self.files_to_save = run_files or []
- @classmethod
- def is_active(cls, task_vars):
+ def is_active(self):
+ if isinstance(is_active, Exception):
+ raise is_active
return is_active
- def run(self, tmp, task_vars):
+ def run(self):
+ self.changed = changed
if run_exception is not None:
raise run_exception
return run_return
+ def get_var(*args, **_):
+ return get_var_return
+
+ def register_failure(self, exc):
+ self.failures.append(OpenShiftCheckException(str(exc)))
+ return
+
return FakeCheck
@@ -79,7 +93,8 @@ def skipped(result):
None,
{},
])
-def test_action_plugin_missing_openshift_facts(plugin, task_vars):
+def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result, msg_has=['openshift_facts'])
@@ -93,26 +108,36 @@ def test_action_plugin_cannot_load_checks_with_the_same_name(plugin, task_vars,
result = plugin.run(tmp=None, task_vars=task_vars)
- assert failed(result, msg_has=['unique', 'duplicate_name', 'FakeCheck'])
+ assert failed(result, msg_has=['duplicate', 'duplicate_name', 'FakeCheck'])
-def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
- checks = [fake_check(is_active=False)]
+@pytest.mark.parametrize('is_active, skipped_reason', [
+ (False, "Not active for this host"),
+ (Exception("borked"), "exception"),
+])
+def test_action_plugin_skip_non_active_checks(is_active, skipped_reason, plugin, task_vars, monkeypatch):
+ checks = [fake_check(is_active=is_active)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert result['checks']['fake_check'].get('skipped')
+ assert skipped_reason in result['checks']['fake_check'].get('skipped_reason')
assert not failed(result)
assert not changed(result)
assert not skipped(result)
-def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('to_disable', [
+ 'fake_check',
+ ['fake_check', 'spam'],
+ '*,spam,eggs',
+])
+def test_action_plugin_skip_disabled_checks(to_disable, plugin, task_vars, monkeypatch):
checks = [fake_check('fake_check', is_active=True)]
monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
- task_vars['openshift_disable_check'] = 'fake_check'
+ task_vars['openshift_disable_check'] = to_disable
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
@@ -121,10 +146,21 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
assert not skipped(result)
+def test_action_plugin_run_list_checks(monkeypatch):
+ task = FakeTask('openshift_health_check', {'checks': []})
+ plugin = ActionModule(task, None, PlayContext(), None, None, None)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
+ result = plugin.run()
+
+ assert failed(result, msg_has="Available checks")
+ assert not changed(result)
+ assert not skipped(result)
+
+
def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
check_return_value = {'ok': 'test'}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ check_class = fake_check(run_return=check_return_value, run_files=[None])
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -136,23 +172,24 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
- check_return_value = {'ok': 'test', 'changed': True}
- check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ check_return_value = {'ok': 'test'}
+ check_class = fake_check(run_return=check_return_value, changed=True)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert result['checks']['fake_check'] == check_return_value
+ assert changed(result['checks']['fake_check'])
assert not failed(result)
assert changed(result)
assert not skipped(result)
def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
- check_return_value = {'failed': True}
+ check_return_value = {'failed': True, 'msg': 'this is a failure'}
check_class = fake_check(run_return=check_return_value)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -163,23 +200,51 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
assert not skipped(result)
-def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
+@pytest.mark.parametrize('exc_class, expect_traceback', [
+ (OpenShiftCheckException, False),
+ (Exception, True),
+])
+def test_action_plugin_run_check_exception(plugin, task_vars, exc_class, expect_traceback, monkeypatch):
exception_msg = 'fake check has an exception'
- run_exception = OpenShiftCheckException(exception_msg)
- check_class = fake_check(run_exception=run_exception)
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+ run_exception = exc_class(exception_msg)
+ check_class = fake_check(run_exception=run_exception, changed=True)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
assert failed(result['checks']['fake_check'], msg_has=exception_msg)
+ assert expect_traceback == ("Traceback" in result['checks']['fake_check']['msg'])
assert failed(result, msg_has=['failed'])
- assert not changed(result)
+ assert changed(result['checks']['fake_check'])
+ assert changed(result)
assert not skipped(result)
+def test_action_plugin_run_check_output_dir(plugin, task_vars, tmpdir, monkeypatch):
+ check_class = fake_check(
+ run_return={},
+ run_logs=[('thing', 'note')],
+ run_files=[
+ FileToSave('save.file', 'contents', None),
+ FileToSave('save.file', 'duplicate', None),
+ FileToSave('copy.file', None, 'foo'), # note: copy runs execute_module => exception
+ ],
+ )
+ task_vars['openshift_checks_output_dir'] = str(tmpdir)
+ check_class.get_var = lambda self, name, **_: task_vars.get(name)
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {'fake_check': check_class()})
+ monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
+
+ plugin.run(tmp=None, task_vars=task_vars)
+ assert any(path.basename == task_vars['ansible_host'] for path in tmpdir.listdir())
+ assert any(path.basename == 'fake_check.log.json' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file' for path in tmpdir.visit())
+ assert any(path.basename == 'save.file.2' for path in tmpdir.visit())
+
+
def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
- monkeypatch.setattr(plugin, 'load_known_checks', lambda: {})
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
result = plugin.run(tmp=None, task_vars=task_vars)
@@ -214,24 +279,21 @@ def test_resolve_checks_ok(names, all_checks, expected):
assert resolve_checks(names, all_checks) == expected
-@pytest.mark.parametrize('names,all_checks,words_in_exception,words_not_in_exception', [
+@pytest.mark.parametrize('names,all_checks,words_in_exception', [
(
['testA', 'testB'],
[],
['check', 'name', 'testA', 'testB'],
- ['tag', 'group', '@'],
),
(
['@group'],
[],
['tag', 'name', 'group'],
- ['check', '@'],
),
(
['testA', 'testB', '@group'],
[],
['check', 'name', 'testA', 'testB', 'tag', 'group'],
- ['@'],
),
(
['testA', 'testB', '@group'],
@@ -241,13 +303,45 @@ def test_resolve_checks_ok(names, all_checks, expected):
fake_check('from_group_2', ['preflight', 'group']),
],
['check', 'name', 'testA', 'testB'],
- ['tag', 'group', '@'],
),
])
-def test_resolve_checks_failure(names, all_checks, words_in_exception, words_not_in_exception):
+def test_resolve_checks_failure(names, all_checks, words_in_exception):
with pytest.raises(Exception) as excinfo:
resolve_checks(names, all_checks)
for word in words_in_exception:
assert word in str(excinfo.value)
- for word in words_not_in_exception:
- assert word not in str(excinfo.value)
+
+
+@pytest.mark.parametrize('give_output_dir, result, expect_file', [
+ (False, None, False),
+ (True, dict(content="c3BhbQo=", encoding="base64"), True),
+ (True, dict(content="encoding error", encoding="base64"), False),
+ (True, dict(content="spam", no_encoding=None), True),
+ (True, dict(failed=True, msg="could not slurp"), False),
+])
+def test_copy_remote_file_to_dir(give_output_dir, result, expect_file, tmpdir):
+ check = fake_check()()
+ check.execute_module = lambda *args, **_: result
+ copy_remote_file_to_dir(check, "remote_file", str(tmpdir) if give_output_dir else "", "local_file")
+ assert expect_file == any(path.basename == "local_file" for path in tmpdir.listdir())
+
+
+def test_write_to_output_exceptions(tmpdir, monkeypatch, capsys):
+
+ class Spam(object):
+ def __str__(self):
+ raise Exception("break str")
+
+ test = {1: object(), 2: Spam()}
+ test[3] = test
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error writing" in test["output_files"]
+
+ output_dir = tmpdir.join("eggs")
+ output_dir.write("spam") # so now it's not a dir
+ write_to_output_file(str(output_dir), "somefile", "somedata")
+ assert "Could not write" in capsys.readouterr()[1]
+
+ monkeypatch.setattr("openshift_health_check.prepare_output_dir", lambda *_: False)
+ write_result_to_output_dir(str(tmpdir), test)
+ assert "Error creating" in test["output_files"]
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
index 697805dd2..4100f6c70 100644
--- a/roles/openshift_health_checker/test/aos_version_test.py
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -18,7 +18,43 @@ expected_pkgs = {
}
-@pytest.mark.parametrize('pkgs, expect_not_found', [
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
+ (
+ # all found
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
+ expected_pkgs,
+ ),
+ (
+ # found with more specific version
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
+ expected_pkgs,
+ ),
+ (
+ [Package('ovs', '2.6'), Package('ovs', '2.4')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+ (
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_precise_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
@@ -55,14 +91,6 @@ expected_pkgs = {
}, # not the right version
),
(
- [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
- {}, # all found
- ),
- (
- [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
- {}, # found with more specific version
- ),
- (
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
@@ -73,64 +101,86 @@ expected_pkgs = {
}, # eggs found with multiple versions
),
])
-def test_check_pkgs_for_precise_version(pkgs, expect_not_found):
- if expect_not_found:
- with pytest.raises(aos_version.PreciseVersionNotFound) as e:
- aos_version._check_precise_version_found(pkgs, expected_pkgs)
-
- assert list(expect_not_found.values()) == e.value.problem_pkgs
- else:
+def test_check_precise_version_found_fail(pkgs, expect_not_found):
+ with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
+ assert list(expect_not_found.values()) == e.value.problem_pkgs
-@pytest.mark.parametrize('pkgs, expect_higher', [
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
- [],
+ expected_pkgs,
),
(
+ # more precise but not strictly higher
[Package('spam', '3.2.1.9')],
- [], # more precise but not strictly higher
+ expected_pkgs,
),
(
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_higher_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
+ (
[Package('spam', '3.3')],
+ expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
+ expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
+ (
+ [Package('ovs', '2.8')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ['ovs-2.8'],
+ ),
])
-def test_check_pkgs_for_greater_version(pkgs, expect_higher):
- if expect_higher:
- with pytest.raises(aos_version.FoundHigherVersion) as e:
- aos_version._check_higher_version_found(pkgs, expected_pkgs)
- assert set(expect_higher) == set(e.value.problem_pkgs)
- else:
- aos_version._check_higher_version_found(pkgs, expected_pkgs)
+def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
+ with pytest.raises(aos_version.FoundHigherVersion) as e:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+ assert set(expect_higher) == set(e.value.problem_pkgs)
-@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
- (
- [],
- [],
- ),
- (
- [Package('spam', '3.2.1')],
- [],
- ),
- (
- [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
- [],
- ),
+@pytest.mark.parametrize('pkgs', [
+ [],
+ [Package('spam', '3.2.1')],
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
+])
+def test_check_multi_minor_release(pkgs):
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
@@ -140,10 +190,7 @@ def test_check_pkgs_for_greater_version(pkgs, expect_higher):
['eggs'],
),
])
-def test_check_pkgs_for_multi_release(pkgs, expect_to_flag_pkgs):
- if expect_to_flag_pkgs:
- with pytest.raises(aos_version.FoundMultiRelease) as e:
- aos_version._check_multi_minor_release(pkgs, expected_pkgs)
- assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
- else:
+def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
+ with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+ assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
diff --git a/roles/openshift_health_checker/test/conftest.py b/roles/openshift_health_checker/test/conftest.py
index 3cbd65507..244a1f0fa 100644
--- a/roles/openshift_health_checker/test/conftest.py
+++ b/roles/openshift_health_checker/test/conftest.py
@@ -7,5 +7,6 @@ openshift_health_checker_path = os.path.dirname(os.path.dirname(__file__))
sys.path[1:1] = [
openshift_health_checker_path,
os.path.join(openshift_health_checker_path, 'action_plugins'),
+ os.path.join(openshift_health_checker_path, 'callback_plugins'),
os.path.join(openshift_health_checker_path, 'library'),
]
diff --git a/roles/openshift_health_checker/test/curator_test.py b/roles/openshift_health_checker/test/curator_test.py
index ae108c96e..62c680b74 100644
--- a/roles/openshift_health_checker/test/curator_test.py
+++ b/roles/openshift_health_checker/test/curator_test.py
@@ -1,22 +1,6 @@
import pytest
-from openshift_checks.logging.curator import Curator
-
-
-def canned_curator(exec_oc=None):
- """Create a Curator check object with canned exec_oc method"""
- check = Curator("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
-def assert_error(error, expect_error):
- if expect_error:
- assert error
- assert expect_error in error
- else:
- assert not error
+from openshift_checks.logging.curator import Curator, OpenShiftCheckException
plain_curator_pod = {
@@ -44,25 +28,30 @@ not_running_curator_pod = {
}
+def test_get_curator_pods():
+ check = Curator()
+ check.get_pods_for_component = lambda *_: [plain_curator_pod]
+ result = check.run()
+ assert "failed" not in result or not result["failed"]
+
+
@pytest.mark.parametrize('pods, expect_error', [
(
[],
- "no Curator pods",
- ),
- (
- [plain_curator_pod],
- None,
+ 'MissingComponentPods',
),
(
[not_running_curator_pod],
- "not currently in a running state",
+ 'CuratorNotRunning',
),
(
[plain_curator_pod, plain_curator_pod],
- "more than one Curator pod",
+ 'TooManyCurators',
),
])
-def test_get_curator_pods(pods, expect_error):
- check = canned_curator()
- error = check.check_curator(pods)
- assert_error(error, expect_error)
+def test_get_curator_pods_fail(pods, expect_error):
+ check = Curator()
+ check.get_pods_for_component = lambda *_: pods
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run()
+ assert excinfo.value.name == expect_error
diff --git a/roles/openshift_health_checker/test/diagnostics_test.py b/roles/openshift_health_checker/test/diagnostics_test.py
new file mode 100644
index 000000000..800889fa7
--- /dev/null
+++ b/roles/openshift_health_checker/test/diagnostics_test.py
@@ -0,0 +1,50 @@
+import pytest
+
+from openshift_checks.diagnostics import DiagnosticCheck, OpenShiftCheckException
+
+
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(config_base="/etc/origin/")
+ )
+ )
+
+
+def test_module_succeeds(task_vars):
+ check = DiagnosticCheck(lambda *_: {"result": "success"}, task_vars)
+ check.is_first_master = lambda: True
+ assert check.is_active()
+ check.exec_diagnostic("spam")
+ assert not check.failures
+
+
+def test_oc_not_there(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "[Errno 2] No such file or directory"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_diagnostic("spam")
+ assert excinfo.value.name == "OcNotFound"
+
+
+def test_module_fails(task_vars):
+ def exec_module(*_):
+ return {"failed": True, "result": "something broke"}
+
+ check = DiagnosticCheck(exec_module, task_vars)
+ check.exec_diagnostic("spam")
+ assert check.failures and check.failures[0].name == "OcDiagFailed"
+
+
+def test_names_executed(task_vars):
+ task_vars["openshift_check_diagnostics"] = diagnostics = "ConfigContexts,spam,,eggs"
+
+ def exec_module(module, args, *_):
+ assert "extra_args" in args
+ assert args["extra_args"][0] in diagnostics
+ return {"result": "success"}
+
+ DiagnosticCheck(exec_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 945b9eafc..9ae679b79 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -17,26 +17,37 @@ def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
)
- assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+ assert DiskAvailability(None, task_vars).is_active() == is_active
-@pytest.mark.parametrize('ansible_mounts,extra_words', [
- ([], ['none']), # empty ansible_mounts
- ([{'mount': '/mnt'}], ['/mnt']), # missing relevant mount paths
- ([{'mount': '/var'}], ['/var']), # missing size_available
+@pytest.mark.parametrize('desc, ansible_mounts, expect_chunks', [
+ (
+ 'empty ansible_mounts',
+ [],
+ ['determine mount point', 'none'],
+ ),
+ (
+ 'missing relevant mount paths',
+ [{'mount': '/mnt'}],
+ ['determine mount point', '/mnt'],
+ ),
+ (
+ 'missing size_available',
+ [{'mount': '/var'}, {'mount': '/usr'}, {'mount': '/tmp'}],
+ ['missing', 'size_available'],
+ ),
])
-def test_cannot_determine_available_disk(ansible_mounts, extra_words):
+def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
task_vars = dict(
group_names=['masters'],
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ DiskAvailability(fake_execute_module, task_vars).run()
- for word in 'determine disk availability'.split() + extra_words:
- assert word in str(excinfo.value)
+ for chunk in expect_chunks:
+ assert chunk in str(excinfo.value)
@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
@@ -93,14 +104,14 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = DiskAvailability(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
-@pytest.mark.parametrize('group_names,configured_min,ansible_mounts,extra_words', [
+@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
(
+ 'test with no space available',
['masters'],
0,
[{
@@ -110,6 +121,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
['0.0 GB'],
),
(
+ 'test with a higher configured required value',
['masters'],
100, # set a higher threshold
[{
@@ -119,6 +131,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
['100.0 GB'],
),
(
+ 'test with 1GB available, but "0" GB space requirement',
['nodes'],
0,
[{
@@ -128,6 +141,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
['1.0 GB'],
),
(
+ 'test with no space available, but "0" GB space requirement',
['etcd'],
0,
[{
@@ -137,16 +151,17 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
['0.0 GB'],
),
(
+ 'test with enough space for a node, but not for a master',
['nodes', 'masters'],
0,
[{
'mount': '/',
- # enough space for a node, not enough for a master
'size_available': 15 * 10**9 + 1,
}],
['15.0 GB'],
),
(
+ 'test failure with enough space on "/", but not enough on "/var"',
['etcd'],
0,
[{
@@ -160,20 +175,74 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
}],
['0.0 GB'],
),
-])
-def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible_mounts, extra_words):
+], ids=lambda argval: argval[0])
+def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, expect_chunks):
task_vars = dict(
group_names=group_names,
openshift_check_min_host_disk_gb=configured_min,
ansible_mounts=ansible_mounts,
)
- check = DiskAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
+
+ assert check.failures
+ for chunk in 'below recommended'.split() + expect_chunks:
+ assert chunk in str(check.failures[0])
+
+
+@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
+ (
+ 'test without enough space for master under "upgrade" context',
+ ['nodes', 'masters'],
+ "upgrade",
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9 + 1,
+ 'size_total': 21 * 10**9 + 1,
+ }],
+ True,
+ ["1.0 GB"],
+ ),
+ (
+ 'test with enough space for master under "upgrade" context',
+ ['nodes', 'masters'],
+ "upgrade",
+ [{
+ 'mount': '/',
+ 'size_available': 10 * 10**9 + 1,
+ 'size_total': 21 * 10**9 + 1,
+ }],
+ False,
+ [],
+ ),
+ (
+ 'test with not enough space for master, and non-upgrade context',
+ ['nodes', 'masters'],
+ "health",
+ [{
+ 'mount': '/',
+ # not enough space for a master,
+ # "health" context should not lower requirement
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ True,
+ ["20.0 GB", "below minimum"],
+ ),
+], ids=lambda argval: argval[0])
+def test_min_required_space_changes_with_upgrade_context(name, group_names, context, ansible_mounts, failed, extra_words):
+ task_vars = dict(
+ r_openshift_health_checker_playbook_context=context,
+ group_names=group_names,
+ ansible_mounts=ansible_mounts,
+ )
+
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert result['failed']
- for word in 'below recommended'.split() + extra_words:
- assert word in result['msg']
+ assert bool(check.failures) == failed
+ for word in extra_words:
+ assert word in str(check.failures[0])
def fake_execute_module(*args):
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 3b9e097fb..c523ffd5c 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,11 +3,26 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
+@pytest.fixture()
+def task_vars():
+ return dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(),
+ ),
+ openshift_deployment_type='origin',
+ openshift_image_tag='',
+ group_names=['nodes', 'masters'],
+ )
+
+
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
("origin", True, [], True),
("openshift-enterprise", True, [], True),
- ("enterprise", True, [], False),
- ("online", True, [], False),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
@@ -15,13 +30,11 @@ from openshift_checks.docker_image_availability import DockerImageAvailability
("origin", False, ["nodes", "masters"], True),
("openshift-enterprise", False, ["etcd"], False),
])
-def test_is_active(deployment_type, is_containerized, group_names, expect_active):
- task_vars = dict(
- openshift=dict(common=dict(is_containerized=is_containerized)),
- openshift_deployment_type=deployment_type,
- group_names=group_names,
- )
- assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
+def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
+ task_vars['openshift_deployment_type'] = deployment_type
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['group_names'] = group_names
+ assert DockerImageAvailability(None, task_vars).is_active() == expect_active
@pytest.mark.parametrize("is_containerized,is_atomic", [
@@ -30,10 +43,10 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
(True, False),
(False, True),
])
-def test_all_images_available_locally(is_containerized, is_atomic):
- def execute_module(module_name, module_args, task_vars):
+def test_all_images_available_locally(task_vars, is_containerized, is_atomic):
+ def execute_module(module_name, module_args, *_):
if module_name == "yum":
- return {"changed": True}
+ return {}
assert module_name == "docker_image_facts"
assert 'name' in module_args
@@ -42,19 +55,9 @@ def test_all_images_available_locally(is_containerized, is_atomic):
'images': [module_args['name']],
}
- result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=is_containerized,
- is_atomic=is_atomic,
- ),
- docker=dict(additional_registries=["docker.io"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='3.4',
- group_names=['nodes', 'masters'],
- ))
+ task_vars['openshift']['common']['is_containerized'] = is_containerized
+ task_vars['openshift']['common']['is_atomic'] = is_atomic
+ result = DockerImageAvailability(execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -63,31 +66,43 @@ def test_all_images_available_locally(is_containerized, is_atomic):
False,
True,
])
-def test_all_images_available_remotely(available_locally):
- def execute_module(module_name, module_args, task_vars):
+def test_all_images_available_remotely(task_vars, available_locally):
+ def execute_module(module_name, *_):
if module_name == 'docker_image_facts':
return {'images': [], 'failed': available_locally}
- return {'changed': False}
+ return {}
- result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
- ),
- openshift_deployment_type='origin',
- openshift_image_tag='v3.4',
- group_names=['nodes', 'masters'],
- ))
+ task_vars['openshift_docker_additional_registries'] = ["docker.io", "registry.access.redhat.com"]
+ task_vars['openshift_image_tag'] = 'v3.4'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ result = check.run()
assert not result.get('failed', False)
-def test_all_images_unavailable():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+def test_all_images_unavailable(task_vars):
+ def execute_module(module_name=None, *args):
+ if module_name == "wait_for":
+ return {}
+ elif module_name == "command":
+ return {'failed': True}
+
+ return {} # docker_image_facts failure
+
+ task_vars['openshift_docker_additional_registries'] = ["docker.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ task_vars['openshift_image_tag'] = 'latest'
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
+
+ assert actual['failed']
+ assert "required Docker images are not available" in actual['msg']
+
+
+def test_no_known_registries():
+ def execute_module(module_name=None, *_):
if module_name == "command":
return {
'failed': True,
@@ -97,23 +112,26 @@ def test_all_images_unavailable():
'changed': False,
}
- check = DockerImageAvailability(execute_module=execute_module)
- actual = check.run(tmp=None, task_vars=dict(
+ def mock_known_docker_registries():
+ return []
+
+ dia = DockerImageAvailability(execute_module, task_vars=dict(
openshift=dict(
common=dict(
service_type='origin',
is_containerized=False,
is_atomic=False,
- ),
- docker=dict(additional_registries=["docker.io"]),
+ )
),
+ openshift_docker_additional_registries=["docker.io"],
openshift_deployment_type="openshift-enterprise",
openshift_image_tag='latest',
group_names=['nodes', 'masters'],
))
-
+ dia.known_docker_registries = mock_known_docker_registries
+ actual = dia.run()
assert actual['failed']
- assert "required Docker images are not available" in actual['msg']
+ assert "Unable to retrieve any docker registries." in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
@@ -126,62 +144,63 @@ def test_all_images_unavailable():
["dependencies can be installed via `yum`"]
),
])
-def test_skopeo_update_failure(message, extra_words):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+def test_skopeo_update_failure(task_vars, message, extra_words):
+ def execute_module(module_name=None, *_):
if module_name == "yum":
return {
"failed": True,
"msg": message,
- "changed": False,
}
- return {'changed': False}
+ return {}
- actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=["unknown.io"]),
- ),
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- ))
+ task_vars['openshift_docker_additional_registries'] = ["unknown.io"]
+ task_vars['openshift_deployment_type'] = "openshift-enterprise"
+ check = DockerImageAvailability(execute_module, task_vars)
+ check._module_retry_interval = 0
+ actual = check.run()
assert actual["failed"]
for word in extra_words:
assert word in actual["msg"]
-@pytest.mark.parametrize("deployment_type,registries", [
- ("origin", ["unknown.io"]),
- ("openshift-enterprise", ["registry.access.redhat.com"]),
- ("openshift-enterprise", []),
-])
-def test_registry_availability(deployment_type, registries):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {
- 'changed': False,
- }
-
- actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- ),
- docker=dict(additional_registries=registries),
+@pytest.mark.parametrize(
+ "image, registries, connection_test_failed, skopeo_failed, "
+ "expect_success, expect_registries_reached", [
+ (
+ "spam/eggs:v1", ["test.reg"],
+ True, True,
+ False,
+ {"test.reg": False},
),
- openshift_deployment_type=deployment_type,
- openshift_image_tag='',
- group_names=['nodes', 'masters'],
- ))
+ (
+ "spam/eggs:v1", ["test.reg"],
+ False, True,
+ False,
+ {"test.reg": True},
+ ),
+ (
+ "eggs.reg/spam/eggs:v1", ["test.reg"],
+ False, False,
+ True,
+ {"eggs.reg": True},
+ ),
+ ])
+def test_registry_availability(image, registries, connection_test_failed, skopeo_failed,
+ expect_success, expect_registries_reached):
+ def execute_module(module_name=None, *_):
+ if module_name == "wait_for":
+ return dict(msg="msg", failed=connection_test_failed)
+ elif module_name == "command":
+ return dict(msg="msg", failed=skopeo_failed)
+
+ check = DockerImageAvailability(execute_module, task_vars())
+ check._module_retry_interval = 0
- assert not actual.get("failed", False)
+ available = check.is_available_skopeo_image(image, registries)
+ assert available == expect_success
+ assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
@@ -258,7 +277,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
openshift_image_tag='vtest',
)
- assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
def test_containerized_etcd():
@@ -272,4 +291,4 @@ def test_containerized_etcd():
group_names=['etcd'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
- assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+ assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 876614b1d..e0dccc062 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -4,12 +4,6 @@ from openshift_checks import OpenShiftCheckException
from openshift_checks.docker_storage import DockerStorage
-def dummy_check(execute_module=None):
- def dummy_exec(self, status, task_vars):
- raise Exception("dummy executor called")
- return DockerStorage(execute_module=execute_module or dummy_exec)
-
-
@pytest.mark.parametrize('is_containerized, group_names, is_active', [
(False, ["masters", "etcd"], False),
(False, ["masters", "nodes"], True),
@@ -20,10 +14,11 @@ def test_is_active(is_containerized, group_names, is_active):
openshift=dict(common=dict(is_containerized=is_containerized)),
group_names=group_names,
)
- assert DockerStorage.is_active(task_vars=task_vars) == is_active
+ assert DockerStorage(None, task_vars).is_active() == is_active
-non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
+def non_atomic_task_vars():
+ return {"openshift": {"common": {"is_atomic": False}}}
@pytest.mark.parametrize('docker_info, failed, expect_msg', [
@@ -56,7 +51,7 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
(
dict(info={
"Driver": "overlay2",
- "DriverStatus": []
+ "DriverStatus": [("Backing Filesystem", "xfs")],
}),
False,
[],
@@ -64,9 +59,30 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
(
dict(info={
"Driver": "overlay",
+ "DriverStatus": [("Backing Filesystem", "btrfs")],
}),
True,
- ["unsupported Docker storage driver"],
+ ["storage is type 'btrfs'", "only supported with\n'xfs'"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": [("Backing Filesystem", "xfs")],
+ "OperatingSystem": "Red Hat Enterprise Linux Server release 7.2 (Maipo)",
+ "KernelVersion": "3.10.0-327.22.2.el7.x86_64",
+ }),
+ True,
+ ["Docker reports kernel version 3.10.0-327"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ "DriverStatus": [("Backing Filesystem", "xfs")],
+ "OperatingSystem": "CentOS",
+ "KernelVersion": "3.10.0-514",
+ }),
+ False,
+ [],
),
(
dict(info={
@@ -77,16 +93,17 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
),
])
def test_check_storage_driver(docker_info, failed, expect_msg):
- def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name == "yum":
return {}
if module_name != "docker_info":
raise ValueError("not expecting module " + module_name)
return docker_info
- check = dummy_check(execute_module=execute_module)
- check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
- result = check.run(tmp=None, task_vars=non_atomic_task_vars)
+ check = DockerStorage(execute_module, non_atomic_task_vars())
+ check.check_dm_usage = lambda status: dict() # stub out for this test
+ check.check_overlay_usage = lambda info: dict() # stub out for this test
+ result = check.run()
if failed:
assert result["failed"]
@@ -145,9 +162,9 @@ not_enough_space = {
),
])
def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
- check = dummy_check()
- check._get_vg_free = lambda pool, task_vars: vg_free
- result = check._check_dm_usage(driver_status, task_vars)
+ check = DockerStorage(None, task_vars)
+ check.get_vg_free = lambda pool: vg_free
+ result = check.check_dm_usage(driver_status)
result_success = not result.get("failed")
assert result_success is success
@@ -187,18 +204,18 @@ def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
)
])
def test_vg_free(pool, command_returns, raises, returns):
- def execute_module(module_name, module_args, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name != "command":
raise ValueError("not expecting module " + module_name)
return command_returns
- check = dummy_check(execute_module=execute_module)
+ check = DockerStorage(execute_module)
if raises:
with pytest.raises(OpenShiftCheckException) as err:
- check._get_vg_free(pool, {})
+ check.get_vg_free(pool)
assert raises in str(err.value)
else:
- ret = check._get_vg_free(pool, {})
+ ret = check.get_vg_free(pool)
assert ret == returns
@@ -209,7 +226,7 @@ def test_vg_free(pool, command_returns, raises, returns):
("12g", 12.0 * 1024**3),
])
def test_convert_to_bytes(string, expect_bytes):
- got = DockerStorage._convert_to_bytes(string)
+ got = DockerStorage.convert_to_bytes(string)
assert got == expect_bytes
@@ -219,6 +236,70 @@ def test_convert_to_bytes(string, expect_bytes):
])
def test_convert_to_bytes_error(string):
with pytest.raises(ValueError) as err:
- DockerStorage._convert_to_bytes(string)
+ DockerStorage.convert_to_bytes(string)
assert "Cannot convert" in str(err.value)
assert string in str(err.value)
+
+
+ansible_mounts_enough = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 50 * 10**9,
+ 'size_total': 50 * 10**9,
+}]
+ansible_mounts_not_enough = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 0,
+ 'size_total': 50 * 10**9,
+}]
+ansible_mounts_missing_fields = [dict(mount='/var/lib/docker')]
+ansible_mounts_zero_size = [{
+ 'mount': '/var/lib/docker',
+ 'size_available': 0,
+ 'size_total': 0,
+}]
+
+
+@pytest.mark.parametrize('ansible_mounts, threshold, expect_fail, expect_msg', [
+ (
+ ansible_mounts_enough,
+ None,
+ False,
+ [],
+ ),
+ (
+ ansible_mounts_not_enough,
+ None,
+ True,
+ ["usage percentage", "higher than threshold"],
+ ),
+ (
+ ansible_mounts_not_enough,
+ "bogus percent",
+ True,
+ ["is not a percentage"],
+ ),
+ (
+ ansible_mounts_missing_fields,
+ None,
+ True,
+ ["Ansible bug"],
+ ),
+ (
+ ansible_mounts_zero_size,
+ None,
+ True,
+ ["Ansible bug"],
+ ),
+])
+def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
+ task_vars = non_atomic_task_vars()
+ task_vars["ansible_mounts"] = ansible_mounts
+ if threshold is not None:
+ task_vars["max_overlay_usage_percent"] = threshold
+ check = DockerStorage(None, task_vars)
+ docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
+ result = check.check_overlay_usage(docker_info)
+
+ assert expect_fail == bool(result.get("failed"))
+ for msg in expect_msg:
+ assert msg in result["msg"]
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
index b9d375d8c..3fa5e8929 100644
--- a/roles/openshift_health_checker/test/elasticsearch_test.py
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -1,25 +1,26 @@
import pytest
import json
-from openshift_checks.logging.elasticsearch import Elasticsearch
+from openshift_checks.logging.elasticsearch import Elasticsearch, OpenShiftCheckExceptionList
+
task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
-def canned_elasticsearch(exec_oc=None):
- """Create an Elasticsearch check object with canned exec_oc method"""
- check = Elasticsearch("dummy") # fails if a module is actually invoked
+def canned_elasticsearch(task_vars=None, exec_oc=None):
+ """Create an Elasticsearch check object with stubbed exec_oc method"""
+ check = Elasticsearch(None, task_vars or {})
if exec_oc:
- check._exec_oc = exec_oc
+ check.exec_oc = exec_oc
return check
-def assert_error(error, expect_error):
- if expect_error:
- assert error
- assert expect_error in error
- else:
- assert not error
+def assert_error_in_list(expect_err, errorlist):
+ assert any(err.name == expect_err for err in errorlist), "{} in {}".format(str(expect_err), str(errorlist))
+
+
+def pods_by_name(pods):
+ return {pod['metadata']['name']: pod for pod in pods}
plain_es_pod = {
@@ -27,6 +28,7 @@ plain_es_pod = {
"labels": {"component": "es", "deploymentconfig": "logging-es"},
"name": "logging-es",
},
+ "spec": {},
"status": {
"conditions": [{"status": "True", "type": "Ready"}],
"containerStatuses": [{"ready": True}],
@@ -40,6 +42,7 @@ split_es_pod = {
"labels": {"component": "es", "deploymentconfig": "logging-es-2"},
"name": "logging-es-2",
},
+ "spec": {},
"status": {
"conditions": [{"status": "True", "type": "Ready"}],
"containerStatuses": [{"ready": True}],
@@ -48,12 +51,28 @@ split_es_pod = {
"_test_master_name_str": "name logging-es-2",
}
+unready_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-3"},
+ "name": "logging-es-3",
+ },
+ "spec": {},
+ "status": {
+ "conditions": [{"status": "False", "type": "Ready"}],
+ "containerStatuses": [{"ready": False}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "BAD_NAME_RESPONSE",
+}
+
def test_check_elasticsearch():
- assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+ with pytest.raises(OpenShiftCheckExceptionList) as excinfo:
+ canned_elasticsearch().check_elasticsearch([])
+ assert_error_in_list('NoRunningPods', excinfo.value)
# canned oc responses to match so all the checks pass
- def _exec_oc(cmd, args, task_vars):
+ def exec_oc(cmd, args, **_):
if '_cat/master' in cmd:
return 'name logging-es'
elif '/_nodes' in cmd:
@@ -65,33 +84,41 @@ def test_check_elasticsearch():
else:
raise Exception(cmd)
- assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+ check = canned_elasticsearch({}, exec_oc)
+ check.get_pods_for_component = lambda *_: [plain_es_pod]
+ assert {} == check.run()
-def pods_by_name(pods):
- return {pod['metadata']['name']: pod for pod in pods}
+def test_check_running_es_pods():
+ pods, errors = Elasticsearch().running_elasticsearch_pods([plain_es_pod, unready_es_pod])
+ assert plain_es_pod in pods
+ assert_error_in_list('PodNotRunning', errors)
+
+
+def test_check_elasticsearch_masters():
+ pods = [plain_es_pod]
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: plain_es_pod['_test_master_name_str'])
+ assert not check.check_elasticsearch_masters(pods_by_name(pods))
@pytest.mark.parametrize('pods, expect_error', [
(
[],
- 'No logging Elasticsearch masters',
+ 'NoMasterFound',
),
(
- [plain_es_pod],
- None,
+ [unready_es_pod],
+ 'NoMasterName',
),
(
[plain_es_pod, split_es_pod],
- 'Found multiple Elasticsearch masters',
+ 'SplitBrainMasters',
),
])
-def test_check_elasticsearch_masters(pods, expect_error):
+def test_check_elasticsearch_masters_error(pods, expect_error):
test_pods = list(pods)
- check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
-
- errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
- assert_error(''.join(errors), expect_error)
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: test_pods.pop(0)['_test_master_name_str'])
+ assert_error_in_list(expect_error, check.check_elasticsearch_masters(pods_by_name(pods)))
es_node_list = {
@@ -101,80 +128,76 @@ es_node_list = {
}}}
+def test_check_elasticsearch_node_list():
+ check = canned_elasticsearch(task_vars_config_base, lambda *args, **_: json.dumps(es_node_list))
+ assert not check.check_elasticsearch_node_list(pods_by_name([plain_es_pod]))
+
+
@pytest.mark.parametrize('pods, node_list, expect_error', [
(
[],
{},
- 'No logging Elasticsearch masters',
- ),
- (
- [plain_es_pod],
- es_node_list,
- None,
+ 'MissingComponentPods',
),
(
[plain_es_pod],
{}, # empty list of nodes triggers KeyError
- "Failed to query",
+ 'MissingNodeList',
),
(
[split_es_pod],
es_node_list,
- 'does not correspond to any known ES pod',
+ 'EsPodNodeMismatch',
),
])
-def test_check_elasticsearch_node_list(pods, node_list, expect_error):
- check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+def test_check_elasticsearch_node_list_errors(pods, node_list, expect_error):
+ check = canned_elasticsearch(task_vars_config_base, lambda cmd, args, **_: json.dumps(node_list))
+ assert_error_in_list(expect_error, check.check_elasticsearch_node_list(pods_by_name(pods)))
- errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
- assert_error(''.join(errors), expect_error)
+
+def test_check_elasticsearch_cluster_health():
+ test_health_data = [{"status": "green"}]
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
+ assert not check.check_es_cluster_health(pods_by_name([plain_es_pod]))
@pytest.mark.parametrize('pods, health_data, expect_error', [
(
[plain_es_pod],
- [{"status": "green"}],
- None,
- ),
- (
- [plain_es_pod],
[{"no-status": "should bomb"}],
- 'Could not retrieve cluster health status',
+ 'BadEsResponse',
),
(
[plain_es_pod, split_es_pod],
[{"status": "green"}, {"status": "red"}],
- 'Elasticsearch cluster health status is RED',
+ 'EsClusterHealthRed',
),
])
-def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
+def test_check_elasticsearch_cluster_health_errors(pods, health_data, expect_error):
test_health_data = list(health_data)
- check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: json.dumps(test_health_data.pop(0)))
+ assert_error_in_list(expect_error, check.check_es_cluster_health(pods_by_name(pods)))
- errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
- assert_error(''.join(errors), expect_error)
+
+def test_check_elasticsearch_diskspace():
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: 'IUse% Use%\n 3% 4%\n')
+ assert not check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
@pytest.mark.parametrize('disk_data, expect_error', [
(
'df: /elasticsearch/persistent: No such file or directory\n',
- 'Could not retrieve storage usage',
- ),
- (
- 'IUse% Use%\n 3% 4%\n',
- None,
+ 'BadDfResponse',
),
(
'IUse% Use%\n 95% 40%\n',
- 'Inode percent usage on the storage volume',
+ 'InodeUsageTooHigh',
),
(
'IUse% Use%\n 3% 94%\n',
- 'Disk percent usage on the storage volume',
+ 'DiskUsageTooHigh',
),
])
-def test_check_elasticsearch_diskspace(disk_data, expect_error):
- check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
-
- errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
- assert_error(''.join(errors), expect_error)
+def test_check_elasticsearch_diskspace_errors(disk_data, expect_error):
+ check = canned_elasticsearch(exec_oc=lambda *args, **_: disk_data)
+ assert_error_in_list(expect_error, check.check_elasticsearch_diskspace(pods_by_name([plain_es_pod])))
diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
index df9d52d41..d3aae98f2 100644
--- a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
+++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py
@@ -1,7 +1,8 @@
import pytest
from collections import namedtuple
-from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException
+from openshift_checks.etcd_imagedata_size import EtcdImageDataSize
+from openshift_checks import OpenShiftCheckException
from etcdkeysize import check_etcd_key_size
@@ -51,12 +52,12 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
- check = EtcdImageDataSize(execute_module=fake_execute_module)
+ check = EtcdImageDataSize(fake_execute_module, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.run()
- for word in 'determine valid etcd mountpath'.split() + extra_words:
+ for word in ['Unable to determine mount point'] + extra_words:
assert word in str(excinfo.value)
@@ -111,14 +112,14 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
)
])
def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
+ def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
- s, limit_exceeded = check_etcd_key_size(client, tree["key"], args["size_limit_bytes"])
+ s, limit_exceeded = check_etcd_key_size(client, tree["key"], module_args["size_limit_bytes"])
return {"size_limit_exceeded": limit_exceeded}
@@ -133,7 +134,7 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
if size_limit is None:
task_vars.pop("etcd_max_image_data_size_bytes")
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
if should_fail:
assert check["failed"]
@@ -267,14 +268,14 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
),
])
def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
- def execute_module(module_name, args, tmp=None, task_vars=None):
+ def execute_module(module_name, module_args, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
}
client = fake_etcd_client(tree)
- size, limit_exceeded = check_etcd_key_size(client, root_path, args["size_limit_bytes"])
+ size, limit_exceeded = check_etcd_key_size(client, root_path, module_args["size_limit_bytes"])
assert size == expected_size
return {
@@ -289,12 +290,12 @@ def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_
)
)
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
assert not check.get("failed", False)
def test_etcdkeysize_module_failure():
- def execute_module(module_name, tmp=None, task_vars=None):
+ def execute_module(module_name, *_):
if module_name != "etcdkeysize":
return {
"changed": False,
@@ -317,7 +318,7 @@ def test_etcdkeysize_module_failure():
)
)
- check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+ check = EtcdImageDataSize(execute_module, task_vars).run()
assert check["failed"]
for word in "Failed to retrieve stats":
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
new file mode 100644
index 000000000..fae3e578d
--- /dev/null
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -0,0 +1,72 @@
+import pytest
+
+from openshift_checks.etcd_traffic import EtcdTraffic
+
+
+@pytest.mark.parametrize('group_names,version,is_active', [
+ (['masters'], "3.5", False),
+ (['masters'], "3.6", False),
+ (['nodes'], "3.4", False),
+ (['etcd'], "3.4", True),
+ (['etcd'], "1.5", True),
+ (['etcd'], "3.1", False),
+ (['masters', 'nodes'], "3.5", False),
+ (['masters', 'etcd'], "3.5", True),
+ ([], "3.4", False),
+])
+def test_is_active(group_names, version, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift_image_tag=version,
+ )
+ assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
+
+
+@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
+ (["masters"], True, True, ["Higher than normal", "traffic"]),
+ (["masters", "etcd"], False, False, []),
+ (["etcd"], False, False, []),
+])
+def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
+ def execute_module(module_name, *_):
+ return {
+ "matched": matched,
+ "failed": failed,
+ }
+
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(
+ common=dict(service_type="origin", is_containerized=False),
+ )
+ )
+
+ result = EtcdTraffic(execute_module, task_vars).run()
+
+ for word in extra_words:
+ assert word in result.get("msg", "")
+
+ assert result.get("failed", False) == failed
+
+
+@pytest.mark.parametrize('is_containerized,expected_unit_value', [
+ (False, "etcd"),
+ (True, "etcd_container"),
+])
+def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value):
+ task_vars = dict(
+ openshift=dict(
+ common=dict(is_containerized=is_containerized),
+ )
+ )
+
+ def execute_module(module_name, args, *_):
+ assert module_name == "search_journalctl"
+ matchers = args["log_matchers"]
+
+ for matcher in matchers:
+ assert matcher["unit"] == expected_unit_value
+
+ return {"failed": False}
+
+ EtcdTraffic(execute_module, task_vars).run()
diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py
index 917045526..077cea3ea 100644
--- a/roles/openshift_health_checker/test/etcd_volume_test.py
+++ b/roles/openshift_health_checker/test/etcd_volume_test.py
@@ -1,6 +1,7 @@
import pytest
-from openshift_checks.etcd_volume import EtcdVolume, OpenShiftCheckException
+from openshift_checks.etcd_volume import EtcdVolume
+from openshift_checks import OpenShiftCheckException
@pytest.mark.parametrize('ansible_mounts,extra_words', [
@@ -11,12 +12,11 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
task_vars = dict(
ansible_mounts=ansible_mounts,
)
- check = EtcdVolume(execute_module=fake_execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ EtcdVolume(fake_execute_module, task_vars).run()
- for word in 'Unable to find etcd storage mount point'.split() + extra_words:
+ for word in ['Unable to determine mount point'] + extra_words:
assert word in str(excinfo.value)
@@ -76,8 +76,7 @@ def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
- check = EtcdVolume(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = EtcdVolume(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -137,8 +136,7 @@ def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts,
if task_vars["etcd_device_usage_threshold_percent"] is None:
task_vars.pop("etcd_device_usage_threshold_percent")
- check = EtcdVolume(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = EtcdVolume(fake_execute_module, task_vars).run()
assert result['failed']
for word in extra_words:
diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py
new file mode 100644
index 000000000..10db253bc
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_config_test.py
@@ -0,0 +1,348 @@
+import pytest
+
+from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException
+
+
+def canned_fluentd_pod(containers):
+ return {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {
+ "host": "node1",
+ "nodeName": "node1",
+ "containers": containers,
+ },
+ "status": {
+ "phase": "Running",
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+ }
+
+
+fluentd_pod = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {
+ "host": "node1",
+ "nodeName": "node1",
+ "containers": [
+ {
+ "name": "container1",
+ "env": [
+ {
+ "name": "USE_JOURNAL",
+ "value": "true",
+ }
+ ],
+ }
+ ],
+ },
+ "status": {
+ "phase": "Running",
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+not_running_fluentd_pod = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "status": {
+ "phase": "Unknown",
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, extra_words', [
+ (
+ 'test success with use_journald=false, and docker config set to use "json-file"',
+ False,
+ "json-file",
+ [],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["nodes", "etcd"],
+ openshift_logging_fluentd_use_journal=use_journald,
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ error = check.check_logging_config()
+
+ assert error is None
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, words', [
+ (
+ 'test failure with use_journald=false, but docker config set to use "journald"',
+ False,
+ "journald",
+ ['json log files', 'has been set to use "journald"'],
+ ),
+ (
+ 'test failure with use_journald=false, but docker config set to use an "unsupported" driver',
+ False,
+ "unsupported",
+ ["json log files", 'has been set to use "unsupported"'],
+ ),
+ (
+ 'test failure with use_journald=true, but docker config set to use "json-file"',
+ True,
+ "json-file",
+ ['logs from "journald"', 'has been set to use "json-file"'],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["nodes", "etcd"],
+ openshift_logging_fluentd_use_journal=use_journald,
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ error = check.check_logging_config()
+
+ assert error is not None
+ for word in words:
+ assert word in error
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, extra_words', [
+ # use_journald returns false (not using journald), but check succeeds
+ # since docker is set to use json-file
+ (
+ 'test success with use_journald=false, and docker config set to use default driver "json-file"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "json-file",
+ [],
+ ),
+ (
+ 'test success with USE_JOURNAL env var missing and docker config set to use default driver "json-file"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "RANDOM",
+ "value": "value",
+ }],
+ },
+ ]
+ )],
+ "json-file",
+ [],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master(name, pods, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ check.get_pods_for_component = lambda _: pods
+ error = check.check_logging_config()
+
+ assert error is None
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, words', [
+ (
+ 'test failure with use_journald=false, but docker config set to use "journald"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "journald",
+ ['json log files', 'has been set to use "journald"'],
+ ),
+ (
+ 'test failure with use_journald=true, but docker config set to use "json-file"',
+ [fluentd_pod],
+ "json-file",
+ ['logs from "journald"', 'has been set to use "json-file"'],
+ ),
+ (
+ 'test failure with use_journald=false, but docker set to use an "unsupported" driver',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "USE_JOURNAL",
+ "value": "false",
+ }],
+ },
+ ]
+ )],
+ "unsupported",
+ ["json log files", 'has been set to use "unsupported"'],
+ ),
+ (
+ 'test failure with USE_JOURNAL env var missing and docker config set to use "journald"',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [{
+ "name": "RANDOM",
+ "value": "value",
+ }],
+ },
+ ]
+ )],
+ "journald",
+ ["configuration is set to", "json log files"],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_failed(name, pods, logging_driver, words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.execute_module = execute_module
+ check.get_pods_for_component = lambda _: pods
+ error = check.check_logging_config()
+
+ assert error is not None
+ for word in words:
+ assert word in error
+
+
+@pytest.mark.parametrize('name, pods, response, logging_driver, extra_words', [
+ (
+ 'test OpenShiftCheckException with no running containers',
+ [canned_fluentd_pod([])],
+ {
+ "failed": True,
+ "result": "unexpected",
+ },
+ "json-file",
+ ['no running containers'],
+ ),
+ (
+ 'test OpenShiftCheckException one container and no env vars set',
+ [canned_fluentd_pod(
+ [
+ {
+ "name": "container1",
+ "env": [],
+ },
+ ]
+ )],
+ {
+ "failed": True,
+ "result": "unexpected",
+ },
+ "json-file",
+ ['no environment variables'],
+ ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
+ def execute_module(module_name, args):
+ if module_name == "docker_info":
+ return {
+ "info": {
+ "LoggingDriver": logging_driver,
+ }
+ }
+
+ return {}
+
+ task_vars = dict(
+ group_names=["masters"],
+ openshift=dict(
+ common=dict(config_base=""),
+ ),
+ )
+
+ check = FluentdConfig(execute_module, task_vars)
+ check.get_pods_for_component = lambda _: pods
+
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.check_logging_config()
+
+ assert error is not None
+ for word in extra_words:
+ assert word in str(error)
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
index d151c0b19..e7bf9818b 100644
--- a/roles/openshift_health_checker/test/fluentd_test.py
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -1,23 +1,11 @@
import pytest
import json
-from openshift_checks.logging.fluentd import Fluentd
+from openshift_checks.logging.fluentd import Fluentd, OpenShiftCheckExceptionList, OpenShiftCheckException
-def canned_fluentd(exec_oc=None):
- """Create a Fluentd check object with canned exec_oc method"""
- check = Fluentd("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
-def assert_error(error, expect_error):
- if expect_error:
- assert error
- assert expect_error in error
- else:
- assert not error
+def assert_error_in_list(expect_err, errorlist):
+ assert any(err.name == expect_err for err in errorlist), "{} in {}".format(str(expect_err), str(errorlist))
fluentd_pod_node1 = {
@@ -65,45 +53,60 @@ fluentd_node3_unlabeled = {
}
+def test_get_fluentd_pods():
+ check = Fluentd()
+ check.exec_oc = lambda *_: json.dumps(dict(items=[fluentd_node1]))
+ check.get_pods_for_component = lambda *_: [fluentd_pod_node1]
+ assert not check.run()
+
+
@pytest.mark.parametrize('pods, nodes, expect_error', [
(
[],
[],
- 'No nodes appear to be defined',
+ 'NoNodesDefined',
),
(
[],
[fluentd_node3_unlabeled],
- 'There are no nodes with the fluentd label',
+ 'NoNodesLabeled',
),
(
[],
[fluentd_node1, fluentd_node3_unlabeled],
- 'Fluentd will not aggregate logs from these nodes.',
+ 'NodesUnlabeled',
),
(
[],
[fluentd_node2],
- "nodes are supposed to have a Fluentd pod but do not",
+ 'MissingFluentdPod',
),
(
[fluentd_pod_node1, fluentd_pod_node1],
[fluentd_node1],
- 'more Fluentd pods running than nodes labeled',
+ 'TooManyFluentdPods',
),
(
[fluentd_pod_node2_down],
[fluentd_node2],
- "Fluentd pods are supposed to be running",
- ),
- (
- [fluentd_pod_node1],
- [fluentd_node1],
- None,
+ 'FluentdNotRunning',
),
])
-def test_get_fluentd_pods(pods, nodes, expect_error):
- check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
+def test_get_fluentd_pods_errors(pods, nodes, expect_error):
+ check = Fluentd()
+ check.exec_oc = lambda *_: json.dumps(dict(items=nodes))
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.check_fluentd(pods)
+ if isinstance(excinfo.value, OpenShiftCheckExceptionList):
+ assert_error_in_list(expect_error, excinfo.value)
+ else:
+ assert expect_error == excinfo.value.name
+
- error = check.check_fluentd(pods, {})
- assert_error(error, expect_error)
+def test_bad_oc_node_list():
+ check = Fluentd()
+ check.exec_oc = lambda *_: "this isn't even json"
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.get_nodes_by_name()
+ assert 'BadOcNodeList' == excinfo.value.name
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 19140a1b6..04a5e89c4 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -8,23 +8,7 @@ except ImportError:
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
-from openshift_checks.logging.kibana import Kibana
-
-
-def canned_kibana(exec_oc=None):
- """Create a Kibana check object with canned exec_oc method"""
- check = Kibana("dummy") # fails if a module is actually invoked
- if exec_oc:
- check._exec_oc = exec_oc
- return check
-
-
-def assert_error(error, expect_error):
- if expect_error:
- assert error
- assert expect_error in error
- else:
- assert not error
+from openshift_checks.logging.kibana import Kibana, OpenShiftCheckException
plain_kibana_pod = {
@@ -49,39 +33,45 @@ not_running_kibana_pod = {
}
+def test_check_kibana():
+ # should run without exception:
+ Kibana().check_kibana([plain_kibana_pod])
+
+
@pytest.mark.parametrize('pods, expect_error', [
(
[],
- "There are no Kibana pods deployed",
- ),
- (
- [plain_kibana_pod],
- None,
+ "MissingComponentPods",
),
(
[not_running_kibana_pod],
- "No Kibana pod is in a running state",
+ "NoRunningPods",
),
(
[plain_kibana_pod, not_running_kibana_pod],
- "The following Kibana pods are not currently in a running state",
+ "PodNotRunning",
),
])
-def test_check_kibana(pods, expect_error):
- check = canned_kibana()
- error = check.check_kibana(pods)
- assert_error(error, expect_error)
+def test_check_kibana_error(pods, expect_error):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ Kibana().check_kibana(pods)
+ assert expect_error == excinfo.value.name
-@pytest.mark.parametrize('route, expect_url, expect_error', [
+@pytest.mark.parametrize('comment, route, expect_error', [
(
+ "No route returned",
None,
- None,
- 'no_route_exists',
+ "no_route_exists",
),
- # test route with no ingress
(
+ "broken route response",
+ {"status": {}},
+ "get_route_failed",
+ ),
+ (
+ "route with no ingress",
{
"metadata": {
"labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
@@ -94,12 +84,11 @@ def test_check_kibana(pods, expect_error):
"host": "hostname",
}
},
- None,
- 'route_not_accepted',
+ "route_not_accepted",
),
- # test route with no host
(
+ "route with no host",
{
"metadata": {
"labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
@@ -112,12 +101,21 @@ def test_check_kibana(pods, expect_error):
},
"spec": {},
},
- None,
- 'route_missing_host',
+ "route_missing_host",
),
+])
+def test_get_kibana_url_error(comment, route, expect_error):
+ check = Kibana()
+ check.exec_oc = lambda *_: json.dumps(route) if route else ""
- # test route that looks fine
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check._get_kibana_url()
+ assert excinfo.value.name == expect_error
+
+
+@pytest.mark.parametrize('comment, route, expect_url', [
(
+ "test route that looks fine",
{
"metadata": {
"labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
@@ -133,61 +131,57 @@ def test_check_kibana(pods, expect_error):
},
},
"https://hostname/",
- None,
),
])
-def test_get_kibana_url(route, expect_url, expect_error):
- check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
-
- url, error = check._get_kibana_url({})
- if expect_url:
- assert url == expect_url
- else:
- assert not url
- if expect_error:
- assert error == expect_error
- else:
- assert not error
+def test_get_kibana_url(comment, route, expect_url):
+ check = Kibana()
+ check.exec_oc = lambda *_: json.dumps(route)
+ assert expect_url == check._get_kibana_url()
@pytest.mark.parametrize('exec_result, expect', [
(
'urlopen error [Errno 111] Connection refused',
- 'at least one router routing to it?',
+ 'FailedToConnectInternal',
),
(
'urlopen error [Errno -2] Name or service not known',
- 'DNS configured for the Kibana hostname?',
+ 'FailedToResolveInternal',
),
(
'Status code was not [302]: HTTP Error 500: Server error',
- 'did not return the correct status code',
+ 'WrongReturnCodeInternal',
),
(
'bork bork bork',
- 'bork bork bork', # should pass through
+ 'MiscRouteErrorInternal',
),
])
def test_verify_url_internal_failure(exec_result, expect):
- check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result))
- check._get_kibana_url = lambda task_vars: ('url', None)
+ check = Kibana(execute_module=lambda *_: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda: 'url'
- error = check._check_kibana_route({})
- assert_error(error, expect)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.check_kibana_route()
+ assert expect == excinfo.value.name
@pytest.mark.parametrize('lib_result, expect', [
(
- HTTPError('url', 500, "it broke", hdrs=None, fp=None),
- 'it broke',
+ HTTPError('url', 500, 'it broke', hdrs=None, fp=None),
+ 'MiscRouteError',
),
(
- URLError('it broke'),
- 'it broke',
+ URLError('urlopen error [Errno 111] Connection refused'),
+ 'FailedToConnect',
+ ),
+ (
+ URLError('urlopen error [Errno -2] Name or service not known'),
+ 'FailedToResolve',
),
(
302,
- 'returned the wrong error code',
+ 'WrongReturnCode',
),
(
200,
@@ -210,9 +204,41 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
raise lib_result
monkeypatch.setattr(urllib2, 'urlopen', urlopen)
- check = canned_kibana()
- check._get_kibana_url = lambda task_vars: ('url', None)
- check._verify_url_internal = lambda url, task_vars: None
+ check = Kibana()
+ check._get_kibana_url = lambda: 'url'
+ check._verify_url_internal = lambda url: None
+
+ if not expect:
+ check.check_kibana_route()
+ return
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.check_kibana_route()
+ assert expect == excinfo.value.name
+
+
+def test_verify_url_external_skip():
+ check = Kibana(lambda *_: {}, dict(openshift_check_efk_kibana_external="false"))
+ check._get_kibana_url = lambda: 'url'
+ check.check_kibana_route()
+
+
+# this is kind of silly but it adds coverage for the run() method...
+def test_run():
+ pods = ["foo"]
+ ran = dict(check_kibana=False, check_route=False)
+
+ def check_kibana(pod_list):
+ ran["check_kibana"] = True
+ assert pod_list == pods
+
+ def check_kibana_route():
+ ran["check_route"] = True
+
+ check = Kibana()
+ check.get_pods_for_component = lambda *_: pods
+ check.check_kibana = check_kibana
+ check.check_kibana_route = check_kibana_route
- error = check._check_kibana_route({})
- assert_error(error, expect)
+ check.run()
+ assert ran["check_kibana"] and ran["check_route"]
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index b6db34fe3..59c703214 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -1,18 +1,14 @@
import pytest
import json
-from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
+from openshift_checks.logging.logging import LoggingCheck, MissingComponentPods, CouldNotUseOc
task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
-logging_namespace = "logging"
-
-
-def canned_loggingcheck(exec_oc=None):
+def canned_loggingcheck(exec_oc=None, execute_module=None):
"""Create a LoggingCheck object with canned exec_oc method"""
- check = LoggingCheck("dummy") # fails if a module is actually invoked
- check.logging_namespace = 'logging'
+ check = LoggingCheck(execute_module)
if exec_oc:
check.exec_oc = exec_oc
return check
@@ -50,6 +46,16 @@ plain_kibana_pod = {
}
}
+plain_kibana_pod_no_containerstatus = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
fluentd_pod_node1 = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
@@ -80,58 +86,81 @@ plain_curator_pod = {
("Permission denied", "Unexpected error using `oc`"),
])
def test_oc_failure(problem, expect):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, *_):
if module_name == "ocutil":
return dict(failed=True, result=problem)
return dict(changed=False)
- check = LoggingCheck({})
+ check = LoggingCheck(execute_module, task_vars_config_base)
- with pytest.raises(OpenShiftCheckException) as excinfo:
- check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+ with pytest.raises(CouldNotUseOc) as excinfo:
+ check.exec_oc('get foo', [])
assert expect in str(excinfo)
-groups_with_first_master = dict(masters=['this-host', 'other-host'])
-groups_with_second_master = dict(masters=['other-host', 'this-host'])
-groups_not_a_master = dict(masters=['other-host'])
+groups_with_first_master = dict(oo_first_master=['this-host'])
+groups_not_a_master = dict(oo_first_master=['other-host'], oo_masters=['other-host'])
@pytest.mark.parametrize('groups, logging_deployed, is_active', [
(groups_with_first_master, True, True),
(groups_with_first_master, False, False),
(groups_not_a_master, True, False),
- (groups_with_second_master, True, False),
(groups_not_a_master, True, False),
])
def test_is_active(groups, logging_deployed, is_active):
task_vars = dict(
- ansible_ssh_host='this-host',
+ ansible_host='this-host',
groups=groups,
openshift_hosted_logging_deploy=logging_deployed,
)
- assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+ assert LoggingCheck(None, task_vars).is_active() == is_active
-@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
+@pytest.mark.parametrize('pod_output, expect_pods', [
+ (
+ json.dumps({'items': [plain_es_pod]}),
+ [plain_es_pod],
+ ),
+])
+def test_get_pods_for_component(pod_output, expect_pods):
+ check = canned_loggingcheck(lambda *_: pod_output)
+ pods = check.get_pods_for_component("es")
+ assert pods == expect_pods
+
+
+@pytest.mark.parametrize('exec_oc_output, expect_error', [
(
'No resources found.',
- None,
- 'There are no pods in the logging namespace',
+ MissingComponentPods,
),
(
- json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}),
- [plain_es_pod],
- None,
+ '{"items": null}',
+ MissingComponentPods,
),
])
-def test_get_pods_for_component(pod_output, expect_pods, expect_error):
- check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
- pods, error = check.get_pods_for_component(
- lambda name, args, task_vars: {},
- logging_namespace,
- "es",
- {}
- )
- assert_error(error, expect_error)
+def test_get_pods_for_component_fail(exec_oc_output, expect_error):
+ check = canned_loggingcheck(lambda *_: exec_oc_output)
+ with pytest.raises(expect_error):
+ check.get_pods_for_component("es")
+
+
+@pytest.mark.parametrize('name, pods, expected_pods', [
+ (
+ 'test single pod found, scheduled, but no containerStatuses field',
+ [plain_kibana_pod_no_containerstatus],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+ (
+ 'set of pods has at least one pod with containerStatuses (scheduled); should still fail',
+ [plain_kibana_pod_no_containerstatus, plain_kibana_pod],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+
+], ids=lambda argvals: argvals[0])
+def test_get_not_running_pods_no_container_status(name, pods, expected_pods):
+ check = canned_loggingcheck(lambda *_: '')
+ result = check.not_running_pods(pods)
+
+ assert result == expected_pods
diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py
new file mode 100644
index 000000000..c48ade9b8
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_index_time_test.py
@@ -0,0 +1,170 @@
+import json
+
+import pytest
+
+from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException
+
+
+SAMPLE_UUID = "unique-test-uuid"
+
+
+def canned_loggingindextime(exec_oc=None):
+ """Create a check object with a canned exec_oc method"""
+ check = LoggingIndexTime() # fails if a module is actually invoked
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+plain_running_elasticsearch_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-data-master"},
+ "name": "logging-es-data-master-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "phase": "Running",
+ }
+}
+plain_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "phase": "Running",
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "phase": "pending",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_pods', [
+ (
+ [not_running_kibana_pod],
+ [],
+ ),
+ (
+ [plain_running_kibana_pod],
+ [plain_running_kibana_pod],
+ ),
+ (
+ [],
+ [],
+ )
+])
+def test_check_running_pods(pods, expect_pods):
+ check = canned_loggingindextime()
+ pods = check.running_pods(pods)
+ assert pods == expect_pods
+
+
+def test_bad_config_param():
+ with pytest.raises(OpenShiftCheckException) as error:
+ LoggingIndexTime(task_vars=dict(openshift_check_logging_index_timeout_seconds="foo")).run()
+ assert 'InvalidTimeout' == error.value.name
+
+
+def test_no_running_pods():
+ check = LoggingIndexTime()
+ check.get_pods_for_component = lambda *_: [not_running_kibana_pod]
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.run()
+ assert 'kibanaNoRunningPods' == error.value.name
+
+
+def test_with_running_pods():
+ check = LoggingIndexTime()
+ check.get_pods_for_component = lambda *_: [plain_running_kibana_pod, plain_running_elasticsearch_pod]
+ check.curl_kibana_with_uuid = lambda *_: SAMPLE_UUID
+ check.wait_until_cmd_or_err = lambda *_: None
+ assert not check.run().get("failed")
+
+
+@pytest.mark.parametrize('name, json_response, uuid, timeout', [
+ (
+ 'valid count in response',
+ {
+ "count": 1,
+ },
+ SAMPLE_UUID,
+ 0.001,
+ ),
+], ids=lambda argval: argval[0])
+def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
+
+
+@pytest.mark.parametrize('name, json_response, timeout, expect_error', [
+ (
+ 'invalid json response',
+ {
+ "invalid_field": 1,
+ },
+ 0.001,
+ 'esInvalidResponse',
+ ),
+ (
+ 'empty response',
+ {},
+ 0.001,
+ 'esInvalidResponse',
+ ),
+ (
+ 'valid response but invalid match count',
+ {
+ "count": 0,
+ },
+ 0.005,
+ 'NoMatchFound',
+ )
+], ids=lambda argval: argval[0])
+def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
+
+ assert expect_error == error.value.name
+
+
+def test_curl_kibana_with_uuid():
+ check = canned_loggingindextime(lambda *args, **_: json.dumps({"statusCode": 404}))
+ check.generate_uuid = lambda: SAMPLE_UUID
+ assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
+
+
+@pytest.mark.parametrize('name, json_response, expect_error', [
+ (
+ 'invalid json response',
+ {
+ "invalid_field": "invalid",
+ },
+ 'kibanaInvalidResponse',
+ ),
+ (
+ 'wrong error code in response',
+ {
+ "statusCode": 500,
+ },
+ 'kibanaInvalidReturnCode',
+ ),
+], ids=lambda argval: argval[0])
+def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
+ check = canned_loggingindextime(lambda *args, **_: json.dumps(json_response))
+ check.generate_uuid = lambda: SAMPLE_UUID
+
+ with pytest.raises(OpenShiftCheckException) as error:
+ check.curl_kibana_with_uuid(plain_running_kibana_pod)
+
+ assert expect_error == error.value.name
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index 4fbaea0a9..aee2f0416 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -17,7 +17,7 @@ def test_is_active(group_names, is_active):
task_vars = dict(
group_names=group_names,
)
- assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+ assert MemoryAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
@@ -59,8 +59,7 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
ansible_memtotal_mb=ansible_memtotal_mb,
)
- check = MemoryAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
assert not result.get('failed', False)
@@ -117,8 +116,7 @@ def test_fails_with_insufficient_memory(group_names, configured_min, ansible_mem
ansible_memtotal_mb=ansible_memtotal_mb,
)
- check = MemoryAvailability(execute_module=fake_execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = MemoryAvailability(fake_execute_module, task_vars).run()
assert result.get('failed', False)
for word in 'below recommended'.split() + extra_words:
diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py
index 2d83e207d..b1a41ca3c 100644
--- a/roles/openshift_health_checker/test/mixins_test.py
+++ b/roles/openshift_health_checker/test/mixins_test.py
@@ -14,10 +14,10 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
(dict(openshift=dict(common=dict(is_containerized=True))), False),
])
def test_is_active(task_vars, expected):
- assert NotContainerizedCheck.is_active(task_vars) == expected
+ assert NotContainerizedCheck(None, task_vars).is_active() == expected
def test_is_active_missing_task_vars():
with pytest.raises(OpenShiftCheckException) as excinfo:
- NotContainerizedCheck.is_active(task_vars={})
+ NotContainerizedCheck().is_active()
assert 'is_containerized' in str(excinfo.value)
diff --git a/roles/openshift_health_checker/test/openshift_check_test.py b/roles/openshift_health_checker/test/openshift_check_test.py
index e3153979c..bc0c3b26c 100644
--- a/roles/openshift_health_checker/test/openshift_check_test.py
+++ b/roles/openshift_health_checker/test/openshift_check_test.py
@@ -1,7 +1,7 @@
import pytest
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
-from openshift_checks import load_checks, get_var
+from openshift_checks import load_checks
# Fixtures
@@ -28,34 +28,23 @@ def test_OpenShiftCheck_init():
name = "test_check"
run = NotImplemented
- # initialization requires at least one argument (apart from self)
- with pytest.raises(TypeError) as excinfo:
- TestCheck()
+ # execute_module required at init if it will be used
+ with pytest.raises(RuntimeError) as excinfo:
+ TestCheck().execute_module("foo")
assert 'execute_module' in str(excinfo.value)
- assert 'module_executor' in str(excinfo.value)
execute_module = object()
# initialize with positional argument
check = TestCheck(execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, recommended name
+ # initialize with keyword argument
check = TestCheck(execute_module=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check._execute_module == execute_module
- # initialize with keyword argument, deprecated name
- check = TestCheck(module_executor=execute_module)
- # new recommended name
- assert check.execute_module == execute_module
- # deprecated attribute name
- assert check.module_executor == execute_module
+ assert check.task_vars == {}
+ assert check.tmp is None
def test_subclasses():
@@ -81,19 +70,76 @@ def test_load_checks():
assert modules
+def dummy_check(task_vars):
+ class TestCheck(OpenShiftCheck):
+ name = "dummy"
+ run = NotImplemented
+
+ return TestCheck(task_vars=task_vars)
+
+
@pytest.mark.parametrize("keys,expected", [
(("foo",), 42),
(("bar", "baz"), "openshift"),
+ (("bar.baz",), "openshift"),
])
def test_get_var_ok(task_vars, keys, expected):
- assert get_var(task_vars, *keys) == expected
+ assert dummy_check(task_vars).get_var(*keys) == expected
def test_get_var_error(task_vars, missing_keys):
with pytest.raises(OpenShiftCheckException):
- get_var(task_vars, *missing_keys)
+ dummy_check(task_vars).get_var(*missing_keys)
def test_get_var_default(task_vars, missing_keys):
default = object()
- assert get_var(task_vars, *missing_keys, default=default) == default
+ assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default
+
+
+@pytest.mark.parametrize("keys, convert, expected", [
+ (("foo",), str, "42"),
+ (("foo",), float, 42.0),
+ (("bar", "baz"), bool, False),
+])
+def test_get_var_convert(task_vars, keys, convert, expected):
+ assert dummy_check(task_vars).get_var(*keys, convert=convert) == expected
+
+
+def convert_oscexc(_):
+ raise OpenShiftCheckException("known failure")
+
+
+def convert_exc(_):
+ raise Exception("failure unknown")
+
+
+@pytest.mark.parametrize("keys, convert, expect_text", [
+ (("bar", "baz"), int, "Cannot convert"),
+ (("bar.baz",), float, "Cannot convert"),
+ (("foo",), "bogus", "TypeError"),
+ (("foo",), lambda a, b: 1, "TypeError"),
+ (("foo",), lambda a: 1 / 0, "ZeroDivisionError"),
+ (("foo",), convert_oscexc, "known failure"),
+ (("foo",), convert_exc, "failure unknown"),
+])
+def test_get_var_convert_error(task_vars, keys, convert, expect_text):
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ dummy_check(task_vars).get_var(*keys, convert=convert)
+ assert expect_text in str(excinfo.value)
+
+
+def test_register(task_vars):
+ check = dummy_check(task_vars)
+
+ check.register_failure(OpenShiftCheckException("spam"))
+ assert "spam" in str(check.failures[0])
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.register_file("spam") # no file contents specified
+ assert "not specified" in str(excinfo.value)
+
+ # normally execute_module registers the result file; test disabling that
+ check._execute_module = lambda *args, **_: dict()
+ check.execute_module("eggs", module_args={}, register=False)
+ assert not check.files_to_save
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 6494e1c06..602f32989 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -4,7 +4,7 @@ from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
def test_openshift_version_not_supported():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
openshift_release = '111.7.0'
@@ -16,15 +16,14 @@ def test_openshift_version_not_supported():
openshift_deployment_type='origin',
)
- check = OvsVersion(execute_module=execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ OvsVersion(execute_module, task_vars).run()
assert "no recommended version of Open vSwitch" in str(excinfo.value)
def test_invalid_openshift_release_format():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(*_):
return {}
task_vars = dict(
@@ -33,15 +32,14 @@ def test_invalid_openshift_release_format():
openshift_deployment_type='origin',
)
- check = OvsVersion(execute_module=execute_module)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ OvsVersion(execute_module, task_vars).run()
assert "invalid version" in str(excinfo.value)
@pytest.mark.parametrize('openshift_release,expected_ovs_version', [
- ("3.5", "2.6"),
- ("3.6", "2.6"),
+ ("3.5", ["2.6", "2.7"]),
+ ("3.6", ["2.6", "2.7"]),
("3.4", "2.4"),
("3.3", "2.4"),
("1.0", "2.4"),
@@ -52,9 +50,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
)
- return_value = object()
+ return_value = {} # note: check.execute_module modifies return hash contents
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'rpm_version'
assert "package_list" in module_args
@@ -64,8 +62,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
return return_value
- check = OvsVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
+ result = OvsVersion(execute_module, task_vars).run()
assert result is return_value
@@ -86,4 +83,4 @@ def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized
group_names=group_names,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert OvsVersion.is_active(task_vars=task_vars) == is_active
+ assert OvsVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index f7e916a46..b34e8fbfc 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -14,7 +14,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
ansible_pkg_mgr=pkg_mgr,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert PackageAvailability.is_active(task_vars=task_vars) == is_active
+ assert PackageAvailability(None, task_vars).is_active() == is_active
@pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
@@ -49,15 +49,14 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
),
])
def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
- return_value = object()
+ return_value = {}
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
assert set(module_args['packages']).issuperset(must_have_packages)
assert not set(module_args['packages']).intersection(must_not_have_packages)
- return return_value
+ return {'foo': return_value}
- check = PackageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result is return_value
+ result = PackageAvailability(execute_module, task_vars).run()
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_update_test.py b/roles/openshift_health_checker/test/package_update_test.py
index 5e000cff5..85d3c9cab 100644
--- a/roles/openshift_health_checker/test/package_update_test.py
+++ b/roles/openshift_health_checker/test/package_update_test.py
@@ -2,15 +2,14 @@ from openshift_checks.package_update import PackageUpdate
def test_package_update():
- return_value = object()
+ return_value = {}
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'check_yum_update'
assert 'packages' in module_args
# empty list of packages means "generic check if 'yum update' will work"
assert module_args['packages'] == []
- return return_value
+ return {'foo': return_value}
- check = PackageUpdate(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=None)
- assert result is return_value
+ result = PackageUpdate(execute_module).run()
+ assert result['foo'] is return_value
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 91eace512..8564cd4db 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -3,61 +3,58 @@ import pytest
from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
-@pytest.mark.parametrize('openshift_release, extra_words', [
- ('111.7.0', ["no recommended version of Open vSwitch"]),
- ('0.0.0', ["no recommended version of Docker"]),
-])
-def test_openshift_version_not_supported(openshift_release, extra_words):
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {}
-
- task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
+def task_vars_for(openshift_release, deployment_type):
+ return dict(
+ ansible_pkg_mgr='yum',
+ openshift=dict(common=dict(service_type=deployment_type)),
openshift_release=openshift_release,
openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
+ openshift_deployment_type=deployment_type,
)
- check = PackageVersion(execute_module=execute_module)
+
+def test_openshift_version_not_supported():
+ check = PackageVersion(None, task_vars_for("1.2.3", 'origin'))
+ check.get_openshift_version_tuple = lambda: (3, 4, 1) # won't be in the dict
+
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.get_required_ovs_version()
+ assert "no recommended version of Open vSwitch" in str(excinfo.value)
- for word in extra_words:
- assert word in str(excinfo.value)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.get_required_docker_version()
+ assert "no recommended version of Docker" in str(excinfo.value)
def test_invalid_openshift_release_format():
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {}
-
task_vars = dict(
+ ansible_pkg_mgr='yum',
openshift=dict(common=dict(service_type='origin')),
openshift_image_tag='v0',
openshift_deployment_type='origin',
)
- check = PackageVersion(execute_module=execute_module)
+ check = PackageVersion(lambda *_: {}, task_vars)
with pytest.raises(OpenShiftCheckException) as excinfo:
- check.run(tmp=None, task_vars=task_vars)
+ check.run()
assert "invalid version" in str(excinfo.value)
@pytest.mark.parametrize('openshift_release', [
- "3.5",
+ "111.7.0",
+ "3.7",
"3.6",
+ "3.5.1.2.3",
+ "3.5",
"3.4",
"3.3",
+ "2.1.0",
])
def test_package_version(openshift_release):
- task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type='origin',
- )
- return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return_value = {"foo": object()}
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
assert module_name == 'aos_version'
assert "package_list" in module_args
@@ -67,59 +64,24 @@ def test_package_version(openshift_release):
return return_value
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result is return_value
-
-
-@pytest.mark.parametrize('deployment_type,openshift_release,expected_ovs_version', [
- ("openshift-enterprise", "3.5", "2.6"),
- ("origin", "3.6", "2.6"),
- ("openshift-enterprise", "3.4", "2.4"),
- ("origin", "3.3", "2.4"),
-])
-def test_ovs_package_version(deployment_type, openshift_release, expected_ovs_version):
- task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type=deployment_type,
- )
- return_value = object()
-
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- assert module_name == 'aos_version'
- assert "package_list" in module_args
-
- for pkg in module_args["package_list"]:
- if pkg["name"] == "openvswitch":
- assert pkg["version"] == expected_ovs_version
-
- return return_value
-
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result is return_value
+ check = PackageVersion(execute_module, task_vars_for(openshift_release, 'origin'))
+ result = check.run()
+ assert result == return_value
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
("origin", "3.5", "1.12"),
+ ("origin", "1.3", "1.10"),
+ ("origin", "1.1", "1.8"),
("openshift-enterprise", "3.4", "1.12"),
- ("origin", "3.3", "1.10"),
("openshift-enterprise", "3.2", "1.10"),
- ("origin", "3.1", "1.8"),
("openshift-enterprise", "3.1", "1.8"),
])
def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
- task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type=deployment_type,
- )
- return_value = object()
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return_value = {"foo": object()}
+
+ def execute_module(module_name=None, module_args=None, *_):
assert module_name == 'aos_version'
assert "package_list" in module_args
@@ -129,9 +91,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
return return_value
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result is return_value
+ check = PackageVersion(execute_module, task_vars_for(openshift_release, deployment_type))
+ result = check.run()
+ assert result == return_value
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
@@ -151,4 +113,4 @@ def test_package_version_skip_when_not_master_nor_node(group_names, is_container
group_names=group_names,
openshift=dict(common=dict(is_containerized=is_containerized)),
)
- assert PackageVersion.is_active(task_vars=task_vars) == is_active
+ assert PackageVersion(None, task_vars).is_active() == is_active
diff --git a/roles/openshift_health_checker/test/rpm_version_test.py b/roles/openshift_health_checker/test/rpm_version_test.py
index 2f09ef965..2c1bcf876 100644
--- a/roles/openshift_health_checker/test/rpm_version_test.py
+++ b/roles/openshift_health_checker/test/rpm_version_test.py
@@ -49,7 +49,7 @@ def test_check_pkg_found(pkgs, expect_not_found):
},
{
"eggs": {
- "required_version": "3.2",
+ "required_versions": ["3.2"],
"found_versions": ["3.3"],
}
}, # not the right version
@@ -61,11 +61,11 @@ def test_check_pkg_found(pkgs, expect_not_found):
},
{
"eggs": {
- "required_version": "3.2",
+ "required_versions": ["3.2"],
"found_versions": ["3.3", "1.2"],
},
"spam": {
- "required_version": "3.2",
+ "required_versions": ["3.2"],
"found_versions": ["3.1", "3.3"],
}
}, # not the right version
diff --git a/roles/openshift_health_checker/test/search_journalctl_test.py b/roles/openshift_health_checker/test/search_journalctl_test.py
new file mode 100644
index 000000000..724928aa1
--- /dev/null
+++ b/roles/openshift_health_checker/test/search_journalctl_test.py
@@ -0,0 +1,157 @@
+import pytest
+import search_journalctl
+
+
+def canned_search_journalctl(get_log_output=None):
+ """Create a search_journalctl object with canned get_log_output method"""
+ module = search_journalctl
+ if get_log_output:
+ module.get_log_output = get_log_output
+ return module
+
+
+DEFAULT_TIMESTAMP = 1496341364
+
+
+def get_timestamp(modifier=0):
+ return DEFAULT_TIMESTAMP + modifier
+
+
+def get_timestamp_microseconds(modifier=0):
+ return get_timestamp(modifier) * 1000000
+
+
+def create_test_log_object(stamp, msg):
+ return '{{"__REALTIME_TIMESTAMP": "{}", "MESSAGE": "{}"}}'.format(stamp, msg)
+
+
+@pytest.mark.parametrize('name,matchers,log_input,expected_matches,expected_errors', [
+ (
+ 'test with valid params',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test log message",
+ "unit": "test",
+ },
+ ],
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ ["test log message"],
+ [],
+ ),
+ (
+ 'test with invalid json in log input',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test log message",
+ "unit": "test-unit",
+ },
+ ],
+ [
+ '{__REALTIME_TIMESTAMP: ' + str(get_timestamp_microseconds()) + ', "MESSAGE": "test log message"}',
+ ],
+ [],
+ [
+ ["invalid json", "test-unit", "test log message"],
+ ],
+ ),
+ (
+ 'test with invalid regexp',
+ [
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"test [ log message",
+ "unit": "test",
+ },
+ ],
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ [],
+ [
+ ["invalid regular expression"],
+ ],
+ ),
+], ids=lambda argval: argval[0])
+def test_get_log_matches(name, matchers, log_input, expected_matches, expected_errors):
+ def get_log_output(matcher):
+ return log_input
+
+ module = canned_search_journalctl(get_log_output)
+ matched_regexp, errors = module.get_log_matches(matchers, 500, 60 * 60)
+
+ assert set(matched_regexp) == set(expected_matches)
+ assert len(expected_errors) == len(errors)
+
+ for idx, partial_err_set in enumerate(expected_errors):
+ for partial_err_msg in partial_err_set:
+ assert partial_err_msg in errors[idx]
+
+
+@pytest.mark.parametrize('name,matcher,log_count_lim,stamp_lim_seconds,log_input,expected_match', [
+ (
+ 'test with matching log message, but out of bounds of log_count_lim',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 3,
+ get_timestamp(-100 * 60 * 60),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(), "Sample Logs Beginning"),
+ ],
+ None,
+ ),
+ (
+ 'test with matching log message, but with timestamp too old',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 100,
+ get_timestamp(-10),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"),
+ ],
+ None,
+ ),
+ (
+ 'test with matching log message, and timestamp within time limit',
+ {
+ "start_regexp": r"Sample Logs Beginning",
+ "regexp": r"dummy log message",
+ "unit": "test",
+ },
+ 100,
+ get_timestamp(-1010),
+ [
+ create_test_log_object(get_timestamp_microseconds(), "test log message"),
+ create_test_log_object(get_timestamp_microseconds(), "sample log message"),
+ create_test_log_object(get_timestamp_microseconds(), "fake log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ create_test_log_object(get_timestamp_microseconds(-1000), "Sample Logs Beginning"),
+ ],
+ create_test_log_object(get_timestamp_microseconds(-1000), "dummy log message"),
+ ),
+], ids=lambda argval: argval[0])
+def test_find_matches_skips_logs(name, matcher, log_count_lim, stamp_lim_seconds, log_input, expected_match):
+ match = search_journalctl.find_matches(log_input, matcher, log_count_lim, stamp_lim_seconds)
+ assert match == expected_match
diff --git a/roles/openshift_health_checker/test/zz_failure_summary_test.py b/roles/openshift_health_checker/test/zz_failure_summary_test.py
new file mode 100644
index 000000000..69f27653c
--- /dev/null
+++ b/roles/openshift_health_checker/test/zz_failure_summary_test.py
@@ -0,0 +1,85 @@
+from zz_failure_summary import deduplicate_failures
+
+import pytest
+
+
+@pytest.mark.parametrize('failures,deduplicated', [
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ [
+ {
+ 'host': ('master1',),
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ ),
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ },
+ {
+ 'host': 'node1',
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ [
+ {
+ 'host': ('master1', 'node1'),
+ 'msg': 'One or more checks failed',
+ },
+ ],
+ ),
+ (
+ [
+ {
+ 'host': 'node1',
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ {
+ 'host': 'master2',
+ 'msg': 'Some error happened',
+ },
+ {
+ 'host': 'master1',
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ ],
+ [
+ {
+ 'host': ('master1', 'node1'),
+ 'msg': 'One or more checks failed',
+ 'checks': (('test_check', 'error message'),),
+ },
+ {
+ 'host': ('master2',),
+ 'msg': 'Some error happened',
+ },
+ ],
+ ),
+ # if a failure contain an unhashable value, it will not be deduplicated
+ (
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ [
+ {
+ 'host': 'master1',
+ 'msg': {'unhashable': 'value'},
+ },
+ ],
+ ),
+])
+def test_deduplicate_failures(failures, deduplicated):
+ assert deduplicate_failures(failures) == deduplicated