Skip to content
Snippets Groups Projects
Commit f2d4de72 authored by Janet Goldstein's avatar Janet Goldstein
Browse files

WS-693: Removing broken workflow monitor tests

parent 8297d201
No related branches found
No related tags found
1 merge request!542WS-693: Removing broken workflow monitor tests
Pipeline #3122 passed
# Workflow Monitor Tests: lack thereof
## WorkflowMonitor untestable without extensive refactoring
Due to queue and messenger rework, workflow monitor tests
are broken; test_wf_monitor module has been removed.
Tests were:
* `test_parse_log`: tested that example log file is correctly parsed
by checking for the known order of event types
* `test_parse_log_error`: tested for ValueError when a badly formatted
log is parsed
* `test_read_log`: tested whether or not example log is being correctly read by checking for strings known to
exist in the example log file
* `test_read_log_slow`: tested that wf_monitor can keep up with reading when writing is slow.
* `test_read_log_timeout`: tested timeout functionality of WorkflowMonitor.read_htcondor_log()
* `test_send_events`: mocked core wf_monitor functionality of creating a connection to the RabbitMQ
server, configuring a channel with that connection, and emitting events through that channel
"""Tests removed: all broke when queue and messaging systems were overhauled.
This module is kept so as not to break tests; mocks remain
in case they may be useful Some Fine Day.
"""
"""
Testing suite for wf_monitor. Currently not covered by tests:
- log_decorator_factory
......@@ -5,15 +9,11 @@ Testing suite for wf_monitor. Currently not covered by tests:
"""
import logging
import sys
import threading
import time
from pprint import pprint
from typing import Dict
from unittest.mock import patch
# pylint: disable=E0401, W1203
import pytest
from _pytest.logging import LogCaptureFixture
import wf_monitor.monitor
from messaging.router import Router
......@@ -52,108 +52,6 @@ def mock_monitor_factory(path_to_log: str, timeout: int = 60) -> WorkflowMonitor
with patch("wf_monitor.monitor.Router"):
return WorkflowMonitor(path_to_log, -1, timeout)
# @pytest.mark.skip("Broken due to queue/messenger rework")
# default_mock_monitor = mock_monitor_factory(LOG_PATH)
@pytest.mark.skip("Broken due to queue/messenger rework")
def test_read_log():
"""
Tests whether or not the example log is being correctly read by checking for strings known to
exist in the example log file
"""
test_strs = [
"Image size of job updated: 72",
"72 - ResidentSetSize of job (KB)",
"(1) Normal termination (return value 0)",
"000 (3983.000.000) 08/26 11:06:06 Job submitted from host: "
"<10.64.1.178:9618?addrs=10.64.1.178-9618&noUDP&sock=4050180_7e37_3>",
]
for test_str in test_strs:
assert test_str in default_mock_monitor.log
@pytest.mark.skip(reason="This test sometimes fails. Skipping until it is investigated further.")
def test_read_log_slow(caplog: LogCaptureFixture):
"""
Tests that wf_monitor can keep up with reading when writing is slow.
"""
caplog.set_level(logging.INFO)
slow_log = "logs/slow.log"
# FIXME: This line fails. talk w/ Nathan about this.
thread = threading.Thread(target=mock_monitor_factory, args=slow_log)
thread.start()
# Truncate slow.log
open(slow_log, "w").close()
event = ""
with open(slow_log, "r") as log:
for line in log:
print(line)
event += line
if line == "...\n":
# End of event reached
with open(slow_log, "a") as slow:
slow.write(event)
event = ""
time.sleep(2)
thread.join()
assert f"Finished monitoring {slow_log}.log." in caplog.text
@pytest.mark.skip("HANGS in CI; passes locally")
def test_read_log_timeout():
"""
Tests the timeout functionality of WorkflowMonitor.read_htcondor_log()
"""
with pytest.raises(SystemExit) as sys_ex:
mock_monitor_factory("logs/file-that-does-not-exist.txt", 3)
assert sys_ex.type == SystemExit
assert sys_ex.value.code == -1
@pytest.mark.skip("uses possibly-broken default_mock_monitor")
def test_parse_log():
"""
Tests that the example log file is correctly parsed
by checking for the known order of event types
"""
test_event_types = [
"workflow-executing",
"unknown",
"unknown",
"workflow-complete",
]
for msg, event_type in zip(default_mock_monitor.events, test_event_types):
pprint(msg)
assert msg["event_type"] == event_type
@pytest.mark.skip(reason="Hangs due to infinite loop in read_htcondor_log()")
def test_parse_log_error(caplog: LogCaptureFixture):
"""
Tests that WorkflowMonitor.parse_log() correctly raises a ValueError when a badly formatted
log is parsed
"""
caplog.set_level(logging.INFO)
with pytest.raises(TimeoutError):
mock_monitor_factory("logs/test.log", 5)
msg = (
"HTCondor event not well-formatted. Event contents:\n"
"this will trigger an exception!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n...\n"
)
messages = [record.message for record in caplog.records]
assert msg in messages
@pytest.mark.skip("uses possibly-broken default_mock_monitor")
def test_send_events(caplog: LogCaptureFixture):
"""
Test that mocks the core wf_monitor functionality of creating a connection to the RabbitMQ
server, configuring a channel with that connection, and emitting events through that channel
"""
caplog.set_level(logging.INFO)
for message in default_mock_monitor.events:
mock_send_message(message, default_mock_monitor.message_router)
messages = [record.message for record in caplog.records]
for msg in messages:
assert msg.startswith("Pretending to send message")
@pytest.mark.skip("This is here just to avoid breaking build")
def test_nothing():
pass
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment