Skip to content

Fix crowding-out of tasks in table by skipped and persisted tasks. #226

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/changes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ all releases are available on `PyPI <https://pypi.org/project/pytask>`_ and
- :pull:`221` adds more test cases for parametrizations.
- :pull:`222` adds an automated Github Actions job for creating a list pytask plugins.
- :pull:`225` fixes a circular import noticeable in plugins created by :pull:`197`.
- :pull:`226` fixes a bug where the number of items in the live table during the
execution is not exhausted.


0.1.8 - 2022-02-07
Expand Down
48 changes: 26 additions & 22 deletions src/_pytask/live.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,26 @@ def _generate_table(self, reduce_table: bool, sort_table: bool) -> Table | None:

"""
n_reports_to_display = self._n_entries_in_table - len(self._running_tasks)

if self._verbose < 2:
reports = [
report
for report in self._reports
if report["outcome"]
not in (
TaskOutcome.SKIP,
TaskOutcome.SKIP_UNCHANGED,
TaskOutcome.SKIP_PREVIOUS_FAILED,
TaskOutcome.PERSISTENCE,
)
]
else:
reports = self._reports

if not reduce_table:
relevant_reports = self._reports
relevant_reports = reports
elif n_reports_to_display >= 1:
relevant_reports = self._reports[-n_reports_to_display:]
relevant_reports = reports[-n_reports_to_display:]
else:
relevant_reports = []

Expand All @@ -198,26 +214,14 @@ def _generate_table(self, reduce_table: bool, sort_table: bool) -> Table | None:
table.add_column("Task", overflow="fold")
table.add_column("Outcome")
for report in relevant_reports:
if (
report["outcome"]
in (
TaskOutcome.SKIP,
TaskOutcome.SKIP_UNCHANGED,
TaskOutcome.SKIP_PREVIOUS_FAILED,
TaskOutcome.PERSISTENCE,
)
and self._verbose < 2
):
pass
else:
table.add_row(
format_task_id(
report["task"],
editor_url_scheme=self._editor_url_scheme,
short_name=True,
),
Text(report["outcome"].symbol, style=report["outcome"].style),
)
table.add_row(
format_task_id(
report["task"],
editor_url_scheme=self._editor_url_scheme,
short_name=True,
),
Text(report["outcome"].symbol, style=report["outcome"].style),
)
for task in self._running_tasks.values():
table.add_row(
format_task_id(
Expand Down
70 changes: 70 additions & 0 deletions tests/test_live.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,76 @@ def test_live_execution_displays_subset_of_table(capsys, tmp_path, n_entries_in_
assert "│ ." in captured.out


@pytest.mark.unit
def test_live_execution_skips_do_not_crowd_out_displayed_tasks(capsys, tmp_path):
path = tmp_path.joinpath("task_module.py")
task = PythonFunctionTask(
"task_example", path.as_posix() + "::task_example", path, lambda x: x
)
task.short_name = "task_module.py::task_example"

live_manager = LiveManager()
live = LiveExecution(live_manager, 20, 1, "no_link")

live_manager.start()
live.update_running_tasks(task)
live_manager.stop()

# Test table with running task.
captured = capsys.readouterr()
assert "Task" in captured.out
assert "Outcome" in captured.out
assert "task_module.py::task_example" in captured.out
assert "running" in captured.out

# Add one displayed reports and many more not displayed reports to crowd out the
# valid one.
successful_task = PythonFunctionTask(
"task_success", path.as_posix() + "::task_success", path, lambda x: x
)
successful_task.short_name = "task_module.py::task_success"

tasks = []
for i in range(25):
skipped_task = PythonFunctionTask(
f"task_skip_{i}", path.as_posix() + f"::task_skip_{i}", path, lambda x: x
)
skipped_task.short_name = f"task_module.py::task_skip_{i}"
tasks.append(skipped_task)

live_manager.start()
live.update_running_tasks(successful_task)
for task in tasks:
live.update_running_tasks(task)
live_manager.stop()

captured = capsys.readouterr()
assert "running" in captured.out
assert "task_success" in captured.out
for i in range(25):
assert f"task_skip_{i}" in captured.out

live_manager.resume()
report = ExecutionReport(
task=successful_task, outcome=TaskOutcome.SUCCESS, exc_info=None
)
live.update_reports(report)
for task in tasks:
report = ExecutionReport(task=task, outcome=TaskOutcome.SKIP, exc_info=None)
live.update_reports(report)
live_manager.stop()

# Test final table with reported outcome.
captured = capsys.readouterr()
assert "Task" in captured.out
assert "Outcome" in captured.out
assert "task_module.py::task_example" in captured.out
assert "task_module.py::task_success" in captured.out
assert "running" in captured.out
assert TaskOutcome.SUCCESS.symbol in captured.out
assert "task_skip" not in captured.out


@pytest.mark.end_to_end
def test_full_execution_table_is_displayed_at_the_end_of_execution(tmp_path, runner):
source = """
Expand Down