Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 43ef15e

Browse filesBrowse files
committed
TST: Add interactive timer tests
This adds more robust interactive timer tests to assert against some of the discrepencies that were found in testing. - Run loop shouldn't depend on callback time - Slow callbacks shouldn't cause a timer to drift over time, it should continually fire at the requested cadence - When start() is called again it should invalidate the previous timer associated with that Timer object
1 parent a0863db commit 43ef15e
Copy full SHA for 43ef15e

File tree

1 file changed

+29
-18
lines changed
Filter options

1 file changed

+29
-18
lines changed

‎lib/matplotlib/tests/test_backends_interactive.py

Copy file name to clipboardExpand all lines: lib/matplotlib/tests/test_backends_interactive.py
+29-18Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -627,14 +627,12 @@ def _impl_test_interactive_timers():
627627
# NOTE: We run the timer tests in parallel to avoid longer sequential
628628
# delays which adds to the testing time. Add new tests to one of
629629
# the current event loop iterations if possible.
630+
import time
630631
from unittest.mock import Mock
631632
import matplotlib.pyplot as plt
632633

633634
fig = plt.figure()
634-
event_loop_time = 1 # in seconds
635-
expected_200ms_calls = int(event_loop_time / 0.2)
636-
637-
# Start at 2s interval (would only get one firing), then update to 200ms
635+
# Start at 2s interval (wouldn't get any firings), then update to 100ms
638636
timer_repeating = fig.canvas.new_timer(2000)
639637
mock_repeating = Mock()
640638
timer_repeating.add_callback(mock_repeating)
@@ -645,42 +643,55 @@ def _impl_test_interactive_timers():
645643

646644
timer_repeating.start()
647645
# Test updating the interval updates a running timer
648-
timer_repeating.interval = 200
646+
timer_repeating.interval = 100
649647
# Start as a repeating timer then change to singleshot via the attribute
650648
timer_single_shot.start()
651649
timer_single_shot.single_shot = True
652650

653-
fig.canvas.start_event_loop(event_loop_time)
654-
assert 1 < mock_repeating.call_count <= expected_200ms_calls + 1, \
655-
f"Interval update: Expected between 2 and {expected_200ms_calls + 1} calls, " \
656-
f"got {mock_repeating.call_count}"
651+
fig.canvas.start_event_loop(0.5)
652+
assert 2 <= mock_repeating.call_count <= 5, \
653+
f"Interval update: Expected 2-5 calls, got {mock_repeating.call_count}"
657654
assert mock_single_shot.call_count == 1, \
658655
f"Singleshot: Expected 1 call, got {mock_single_shot.call_count}"
659656

660-
# 200ms timer triggers and the callback takes 100ms to run
661-
# Test that we don't drift and that we get called on every 200ms
662-
# interval and not every 300ms
663-
mock_repeating.side_effect = lambda: time.sleep(0.1)
657+
# 250ms timer triggers and the callback takes 150ms to run
658+
# Test that we don't drift and that we get called on every 250ms
659+
# firing and not every 400ms
660+
timer_repeating.interval = 250
661+
mock_repeating.side_effect = lambda: time.sleep(0.15)
662+
# calling start() again on a repeating timer should remove the old
663+
# one, so we don't want double the number of calls here either because
664+
# two timers are potentially running.
665+
timer_repeating.start()
664666
mock_repeating.call_count = 0
665667
# Make sure we can start the timer after stopping a singleshot timer
666668
timer_single_shot.stop()
667669
timer_single_shot.start()
668670

671+
event_loop_time = 2 # in seconds
672+
expected_calls = int(event_loop_time / (timer_repeating.interval / 1000))
673+
674+
t_start = time.perf_counter()
669675
fig.canvas.start_event_loop(event_loop_time)
670-
# Not exact timers, so add a little slop. We really want to make sure we are
671-
# getting more than 3 (every 300ms).
672-
assert mock_repeating.call_count >= expected_200ms_calls - 1, \
673-
f"Slow callback: Expected at least {expected_200ms_calls - 1} calls, " \
676+
t_loop = time.perf_counter() - t_start
677+
# Should be around 2s, but allow for some slop on CI. We want to make sure
678+
# we aren't getting 2 + (callback time) 0.5s/iteration, which would be 4+ s.
679+
assert 1.8 < t_loop < 3, \
680+
f"Event loop: Expected to run for around 2s, but ran for {t_loop:.2f}s"
681+
# Not exact timers, so add some slop. (Quite a bit for CI resources)
682+
assert abs(mock_repeating.call_count - expected_calls) <= 2, \
683+
f"Slow callback: Expected {expected_calls} calls, " \
674684
f"got {mock_repeating.call_count}"
675685
assert mock_single_shot.call_count == 2, \
676686
f"Singleshot: Expected 2 calls, got {mock_single_shot.call_count}"
677-
plt.close("all")
678687

679688

680689
@pytest.mark.parametrize("env", _get_testable_interactive_backends())
681690
def test_interactive_timers(env):
682691
if env["MPLBACKEND"] == "wx":
683692
pytest.skip("wx backend is deprecated; tests failed on appveyor")
693+
if env["MPLBACKEND"].startswith("gtk3") and is_ci_environment():
694+
pytest.xfail("GTK3 backend timer is slow on CI resources")
684695
_run_helper(_impl_test_interactive_timers,
685696
timeout=_test_timeout, extra_env=env)
686697

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.