From 1bee970957e93199203fea35cd874397329c38fe Mon Sep 17 00:00:00 2001 From: Tobias Raabe Date: Fri, 6 Jun 2025 20:49:55 +0200 Subject: [PATCH] Remove differentiation between test kinds. --- .github/workflows/main.yml | 28 +++------------------ tests/test_build.py | 6 ----- tests/test_cache.py | 5 ---- tests/test_capture.py | 20 --------------- tests/test_clean.py | 17 ------------- tests/test_cli.py | 3 --- tests/test_click.py | 4 --- tests/test_collect.py | 26 -------------------- tests/test_collect_command.py | 23 ------------------ tests/test_collect_utils.py | 3 --- tests/test_compat.py | 6 ----- tests/test_config.py | 7 ------ tests/test_config_utils.py | 3 --- tests/test_console.py | 8 ------ tests/test_dag.py | 5 ---- tests/test_dag_command.py | 6 ----- tests/test_dag_utils.py | 9 ------- tests/test_data_catalog.py | 13 ---------- tests/test_database.py | 4 --- tests/test_debugging.py | 19 --------------- tests/test_dry_run.py | 9 ------- tests/test_execute.py | 46 ----------------------------------- tests/test_git.py | 1 - tests/test_hashlib.py | 1 - tests/test_hook_module.py | 5 ---- tests/test_ignore.py | 3 --- tests/test_live.py | 7 ------ tests/test_logging.py | 5 ---- tests/test_mark.py | 20 --------------- tests/test_mark_cli.py | 3 --- tests/test_mark_expression.py | 7 ------ tests/test_mark_structures.py | 2 -- tests/test_mark_utils.py | 10 -------- tests/test_node_protocols.py | 5 ---- tests/test_nodes.py | 6 ----- tests/test_outcomes.py | 2 -- tests/test_path.py | 8 ------ tests/test_persist.py | 4 --- tests/test_profile.py | 6 ----- tests/test_provisional.py | 12 --------- tests/test_shared.py | 5 ---- tests/test_skipping.py | 12 --------- tests/test_task.py | 32 ------------------------ tests/test_task_utils.py | 4 --- tests/test_traceback.py | 3 --- tests/test_tree_util.py | 3 --- tests/test_typing.py | 3 --- tests/test_warnings.py | 10 -------- 48 files changed, 3 insertions(+), 446 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 3a36262f..617b38f9 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -51,31 +51,9 @@ jobs: sudo apt-get update sudo apt-get install graphviz graphviz-dev - # Unit, integration, and end-to-end tests. - - - name: Run unit tests and doctests. - shell: bash -l {0} - run: uv run --group test pytest --nbmake -m "unit or (not integration and not end_to_end)" --cov=src --cov=tests --cov-report=xml -n auto - - - name: Upload unit test coverage reports to Codecov with GitHub Action - uses: codecov/codecov-action@v5 - with: - flags: unit - - - name: Run integration tests. + - name: Run tests, doctests, and notebook tests shell: bash -l {0} - run: uv run --group test pytest --nbmake -m integration --cov=src --cov=tests --cov-report=xml -n auto + run: uv run --group test pytest --nbmake --cov=src --cov=tests --cov-report=xml -n auto - - name: Upload integration test coverage reports to Codecov with GitHub Action + - name: Upload test coverage reports to Codecov with GitHub Action uses: codecov/codecov-action@v5 - with: - flags: integration - - - name: Run end-to-end tests. - shell: bash -l {0} - run: uv run --group test pytest --nbmake -m end_to_end --cov=src --cov=tests --cov-report=xml -n auto - - - name: Upload end_to_end test coverage reports to Codecov with GitHub Action - uses: codecov/codecov-action@v5 - with: - flags: end_to_end diff --git a/tests/test_build.py b/tests/test_build.py index c523c516..910f97c5 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -2,13 +2,10 @@ import textwrap -import pytest - from pytask import ExitCode from pytask import cli -@pytest.mark.end_to_end def test_execution_failed(runner, tmp_path): source = """ def task_raises(): @@ -20,13 +17,11 @@ def task_raises(): assert result.exit_code == ExitCode.FAILED -@pytest.mark.end_to_end def test_configuration_failed(runner, tmp_path): result = runner.invoke(cli, [tmp_path.joinpath("non_existent_path").as_posix()]) assert result.exit_code == ExitCode.CONFIGURATION_FAILED -@pytest.mark.end_to_end def test_collection_failed(runner, tmp_path): source = """ raise Exception @@ -37,7 +32,6 @@ def test_collection_failed(runner, tmp_path): assert result.exit_code == ExitCode.COLLECTION_FAILED -@pytest.mark.end_to_end def test_building_dag_failed(runner, tmp_path): source = """ from pathlib import Path diff --git a/tests/test_cache.py b/tests/test_cache.py index 75b7e6da..218120f3 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -2,13 +2,10 @@ import inspect -import pytest - from _pytask.cache import Cache from _pytask.cache import _make_memoize_key -@pytest.mark.unit def test_cache(): cache = Cache() @@ -32,7 +29,6 @@ def func(a, b): assert func.cache.cache_info.misses == 1 -@pytest.mark.unit def test_cache_add(): cache = Cache() @@ -56,7 +52,6 @@ def func(a): assert cache.cache_info.misses == 1 -@pytest.mark.unit def test_make_memoize_key(): def func(a, b): # pragma: no cover return a + b diff --git a/tests/test_capture.py b/tests/test_capture.py index 3e69270f..c2aaffe1 100644 --- a/tests/test_capture.py +++ b/tests/test_capture.py @@ -27,7 +27,6 @@ from collections.abc import Generator -@pytest.mark.end_to_end @pytest.mark.parametrize("show_capture", ["s", "no", "stdout", "stderr", "all"]) def test_show_capture(tmp_path, runner, show_capture): source = """ @@ -66,7 +65,6 @@ def task_show_capture(): raise NotImplementedError -@pytest.mark.end_to_end @pytest.mark.parametrize("show_capture", ["no", "stdout", "stderr", "all"]) @pytest.mark.xfail( sys.platform == "win32", @@ -114,7 +112,6 @@ def task_show_capture(): raise NotImplementedError -@pytest.mark.end_to_end @pytest.mark.xfail( sys.platform == "win32", reason="from pytask ... cannot be found", @@ -173,7 +170,6 @@ def TeeStdCapture( # noqa: N802 ) -@pytest.mark.end_to_end class TestCaptureManager: @pytest.mark.parametrize( "method", [CaptureMethod.NO, CaptureMethod.SYS, CaptureMethod.FD] @@ -218,7 +214,6 @@ def test_init_capturing(self): capouter.stop_capturing() -@pytest.mark.end_to_end @pytest.mark.parametrize("method", ["fd", "sys"]) def test_capturing_unicode(tmp_path, runner, method): obj = "'b\u00f6y'" @@ -239,7 +234,6 @@ def task_unicode(): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end @pytest.mark.parametrize("method", ["fd", "sys"]) @pytest.mark.skipif(sys.platform == "win32", reason="Fails on Windows.") def test_capturing_unicode_with_build(tmp_path, method): @@ -266,7 +260,6 @@ def task_unicode(): assert "1 Succeeded" in result.stdout -@pytest.mark.end_to_end @pytest.mark.parametrize("method", ["fd", "sys"]) def test_capturing_bytes_in_utf8_encoding(tmp_path, runner, method): source = """ @@ -283,7 +276,6 @@ def task_unicode(): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end @pytest.mark.xfail(strict=True, reason="pytask cannot capture during collection.") def test_collect_capturing(tmp_path, runner): source = """ @@ -305,7 +297,6 @@ def test_collect_capturing(tmp_path, runner): assert content in result.output -@pytest.mark.end_to_end def test_capturing_outerr(tmp_path, runner): source = """ import sys @@ -341,7 +332,6 @@ def task_capturing_error(): assert content in result.output -@pytest.mark.end_to_end def test_capture_badoutput_issue412(tmp_path, runner): source = """ import os @@ -363,7 +353,6 @@ def task_func(): assert content in result.output -@pytest.mark.unit class TestCaptureIO: def test_text(self): f = capture.CaptureIO() @@ -389,7 +378,6 @@ def test_write_bytes_to_buffer(self): assert f.getvalue() == "foo\r\n" -@pytest.mark.unit class TestTeeCaptureIO(TestCaptureIO): def test_text(self): sio = io.StringIO() @@ -409,7 +397,6 @@ def test_unicode_and_str_mixture(self): pytest.raises(TypeError, f.write, b"hello") -@pytest.mark.integration def test_dontreadfrominput(): from _pytest.capture import DontReadFromInput @@ -424,7 +411,6 @@ def test_dontreadfrominput(): f.close() # just for completeness -@pytest.mark.unit def test_captureresult() -> None: cr = CaptureResult("out", "err") assert len(cr) == 2 @@ -482,7 +468,6 @@ def lsof_check(): assert len2 < len1 + 3, out2 -@pytest.mark.unit class TestFDCapture: def test_simple(self, tmpfile): fd = tmpfile.fileno() @@ -589,7 +574,6 @@ def saved_fd(fd): os.close(new_fd) -@pytest.mark.unit class TestStdCapture: captureclass = staticmethod(StdCapture) @@ -708,7 +692,6 @@ def test_stdin_nulled_by_default(self): pytest.raises(OSError, sys.stdin.read) -@pytest.mark.unit class TestTeeStdCapture(TestStdCapture): captureclass = staticmethod(TeeStdCapture) @@ -725,7 +708,6 @@ def test_capturing_error_recursive(self): assert out2 == "cap2\n" -@pytest.mark.unit class TestStdCaptureFD(TestStdCapture): captureclass = staticmethod(StdCaptureFD) @@ -767,7 +749,6 @@ def test_many(self, capfd): # noqa: ARG002 cap.stop_capturing() -@pytest.mark.unit class TestStdCaptureFDinvalidFD: @pytest.mark.skipif( sys.platform == "darwin" and sys.version_info[:2] == (3, 9), @@ -851,7 +832,6 @@ def test_fdcapture_invalid_fd_without_fd_reuse(self, tmp_path): os.write(2, b"done") -@pytest.mark.unit def test__get_multicapture() -> None: assert isinstance(_get_multicapture(CaptureMethod.NO), MultiCapture) pytest.raises(ValueError, _get_multicapture, "unknown").match( diff --git a/tests/test_clean.py b/tests/test_clean.py index 1258aadb..4491a303 100644 --- a/tests/test_clean.py +++ b/tests/test_clean.py @@ -56,7 +56,6 @@ def task_write_text(path=Path("in_tracked.txt"), produces=Path("out.txt")): return tmp_path -@pytest.mark.end_to_end def test_clean_database_ignored(project, runner): with enter_directory(project): result = runner.invoke(cli, ["build"]) @@ -70,7 +69,6 @@ def test_clean_database_ignored(project, runner): assert "pytask.sqlite3" not in text_without_linebreaks -@pytest.mark.end_to_end def test_clean_with_auto_collect(project, runner): with enter_directory(project): result = runner.invoke(cli, ["clean"]) @@ -82,7 +80,6 @@ def test_clean_with_auto_collect(project, runner): assert "to_be_deleted_file_2.txt" in text_without_linebreaks -@pytest.mark.end_to_end @pytest.mark.parametrize("flag", ["-e", "--exclude"]) @pytest.mark.parametrize("pattern", ["*_1.txt", "to_be_deleted_file_[1]*"]) def test_clean_with_excluded_file(project, runner, flag, pattern): @@ -94,7 +91,6 @@ def test_clean_with_excluded_file(project, runner, flag, pattern): assert "to_be_deleted_file_2.txt" in text_without_linebreaks -@pytest.mark.end_to_end @pytest.mark.parametrize("flag", ["-e", "--exclude"]) @pytest.mark.parametrize("pattern", ["*_1.txt", "to_be_deleted_file_[1]*"]) def test_clean_with_excluded_file_via_config(project, runner, flag, pattern): @@ -111,7 +107,6 @@ def test_clean_with_excluded_file_via_config(project, runner, flag, pattern): assert "pyproject.toml" in text_without_linebreaks -@pytest.mark.end_to_end @pytest.mark.parametrize("flag", ["-e", "--exclude"]) def test_clean_with_excluded_directory(project, runner, flag): result = runner.invoke( @@ -123,7 +118,6 @@ def test_clean_with_excluded_directory(project, runner, flag): assert "deleted_file_1.txt" in result.output.replace("\n", "") -@pytest.mark.end_to_end def test_clean_with_nothing_to_remove(tmp_path, runner): result = runner.invoke(cli, ["clean", "--exclude", "*", tmp_path.as_posix()]) @@ -131,7 +125,6 @@ def test_clean_with_nothing_to_remove(tmp_path, runner): assert "There are no files and directories which can be deleted." in result.output -@pytest.mark.end_to_end def test_clean_dry_run(project, runner): result = runner.invoke(cli, ["clean", project.as_posix()]) @@ -146,7 +139,6 @@ def test_clean_dry_run(project, runner): ).exists() -@pytest.mark.end_to_end def test_clean_dry_run_w_directories(project, runner): result = runner.invoke(cli, ["clean", "-d", project.as_posix()]) @@ -158,7 +150,6 @@ def test_clean_dry_run_w_directories(project, runner): assert "to_be_deleted_folder_1" in text_without_linebreaks -@pytest.mark.end_to_end def test_clean_force(project, runner): result = runner.invoke(cli, ["clean", "--mode", "force", project.as_posix()]) @@ -173,7 +164,6 @@ def test_clean_force(project, runner): ).exists() -@pytest.mark.end_to_end def test_clean_force_w_directories(project, runner): result = runner.invoke(cli, ["clean", "-d", "--mode", "force", project.as_posix()]) @@ -185,7 +175,6 @@ def test_clean_force_w_directories(project, runner): assert "to_be_deleted_folder_1" in text_without_linebreaks -@pytest.mark.end_to_end def test_clean_interactive(project, runner): result = runner.invoke( cli, @@ -204,7 +193,6 @@ def test_clean_interactive(project, runner): ).exists() -@pytest.mark.end_to_end def test_clean_interactive_w_directories(project, runner): result = runner.invoke( cli, @@ -222,7 +210,6 @@ def test_clean_interactive_w_directories(project, runner): assert not project.joinpath("to_be_deleted_folder_1").exists() -@pytest.mark.end_to_end def test_configuration_failed(runner, tmp_path): result = runner.invoke( cli, ["clean", tmp_path.joinpath("non_existent_path").as_posix()] @@ -230,7 +217,6 @@ def test_configuration_failed(runner, tmp_path): assert result.exit_code == ExitCode.CONFIGURATION_FAILED -@pytest.mark.end_to_end def test_collection_failed(runner, tmp_path): source = """ raise Exception @@ -241,7 +227,6 @@ def test_collection_failed(runner, tmp_path): assert result.exit_code == ExitCode.COLLECTION_FAILED -@pytest.mark.end_to_end def test_dont_remove_files_tracked_by_git(runner, git_project): result = runner.invoke(cli, ["clean", git_project.as_posix()]) @@ -251,7 +236,6 @@ def test_dont_remove_files_tracked_by_git(runner, git_project): assert ".git" not in result.output -@pytest.mark.end_to_end def test_clean_git_files_if_git_is_not_installed(monkeypatch, runner, git_project): monkeypatch.setattr( "_pytask.clean.is_git_installed", @@ -266,7 +250,6 @@ def test_clean_git_files_if_git_is_not_installed(monkeypatch, runner, git_projec assert ".git" not in result.output -@pytest.mark.end_to_end def test_clean_git_files_if_git_is_installed_but_git_root_is_not_found( monkeypatch, runner, git_project ): diff --git a/tests/test_cli.py b/tests/test_cli.py index 6df79d32..4da1cd97 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -8,13 +8,11 @@ from tests.conftest import run_in_subprocess -@pytest.mark.end_to_end def test_version_option(): result = run_in_subprocess(("pytask", "--version")) assert "pytask, version " + __version__ in result.stdout -@pytest.mark.end_to_end @pytest.mark.parametrize("help_option", ["-h", "--help"]) @pytest.mark.parametrize( "commands", @@ -33,7 +31,6 @@ def test_help_pages(runner, commands, help_option): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_help_texts_are_modified_by_config(runner, tmp_path): tmp_path.joinpath("pyproject.toml").write_text( '[tool.pytask.ini_options]\nshow_capture = "stdout"' diff --git a/tests/test_click.py b/tests/test_click.py index 28042388..502e69a2 100644 --- a/tests/test_click.py +++ b/tests/test_click.py @@ -9,20 +9,17 @@ from pytask import cli -@pytest.mark.end_to_end def test_choices_are_displayed_in_help_page(runner): result = runner.invoke(cli, ["build", "--help"]) assert "[no|stdout|stderr|all]" in result.output assert "[fd|no|sys|tee-sys]" in result.output -@pytest.mark.end_to_end def test_defaults_are_displayed(runner): result = runner.invoke(cli, ["build", "--help"]) assert "[default: all]" in result.output -@pytest.mark.unit @pytest.mark.parametrize("method", ["first", "second"]) def test_enum_choice(runner, method): class Method(enum.Enum): @@ -40,7 +37,6 @@ def test(method): assert f"method=Method.{method.upper()}" in result.output -@pytest.mark.unit def test_enum_choice_error(runner): class Method(enum.Enum): FIRST = "first" diff --git a/tests/test_collect.py b/tests/test_collect.py index f1889931..0bb12ed9 100644 --- a/tests/test_collect.py +++ b/tests/test_collect.py @@ -18,7 +18,6 @@ from pytask import cli -@pytest.mark.end_to_end @pytest.mark.parametrize( ("depends_on", "produces"), [ @@ -42,7 +41,6 @@ def task_write_text(path=Path({depends_on}), produces=Path({produces})): assert tmp_path.joinpath("out.txt").read_text() == "Relative paths work." -@pytest.mark.end_to_end def test_relative_path_of_path_node(runner, tmp_path): source = """ from pathlib import Path @@ -63,7 +61,6 @@ def task_example( assert tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_collect_nodes_with_the_same_name(runner, tmp_path): """Nodes with the same filename, not path, are not mistaken for each other.""" source = """ @@ -89,7 +86,6 @@ def task_1(path=Path("sub/text.txt"), produces=Path("out_1.txt")): assert tmp_path.joinpath("out_1.txt").read_text() == "in sub" -@pytest.mark.end_to_end @pytest.mark.parametrize("path_extension", ["", "task_module.py"]) def test_collect_same_task_different_ways(tmp_path, path_extension): tmp_path.joinpath("task_module.py").write_text("def task_passes(): pass") @@ -110,7 +106,6 @@ def test_modules_are_not_collected_twice(runner, tmp_path): assert "Collected 1 task" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize( ("task_files", "pattern", "expected_collected_tasks"), [ @@ -152,7 +147,6 @@ def test_error_with_invalid_file_name_pattern_(tmp_path): assert session.exit_code == ExitCode.CONFIGURATION_FAILED -@pytest.mark.unit @pytest.mark.parametrize( ("session", "path", "node_info", "expected"), [ @@ -193,7 +187,6 @@ def test_pytask_collect_node(session, path, node_info, expected): assert str(result.load()) == str(expected) -@pytest.mark.unit @pytest.mark.skipif( sys.platform != "win32", reason="Only works on case-insensitive file systems." ) @@ -217,7 +210,6 @@ def test_pytask_collect_node_raises_error_if_path_is_not_correctly_cased(tmp_pat ) -@pytest.mark.unit @pytest.mark.parametrize("is_absolute", [True, False]) def test_pytask_collect_node_does_not_raise_error_if_path_is_not_normalized( tmp_path, is_absolute @@ -246,7 +238,6 @@ def test_pytask_collect_node_does_not_raise_error_if_path_is_not_normalized( assert str(result.path) == str(real_node) -@pytest.mark.unit def test_find_shortest_uniquely_identifiable_names_for_tasks(tmp_path): tasks = [] expected = {} @@ -287,7 +278,6 @@ def test_find_shortest_uniquely_identifiable_names_for_tasks(tmp_path): assert result == expected -@pytest.mark.end_to_end def test_collect_dependencies_from_args_if_depends_on_is_missing(tmp_path): source = """ from pathlib import Path @@ -305,7 +295,6 @@ def task_example(path_in = Path("in.txt"), produces = Path("out.txt")): assert session.tasks[0].depends_on["path_in"].path == tmp_path.joinpath("in.txt") -@pytest.mark.end_to_end def test_collect_tasks_from_modules_with_the_same_name(tmp_path): """We need to check that task modules can have the same name. See #373 and #374.""" tmp_path.joinpath("a").mkdir() @@ -323,7 +312,6 @@ def test_collect_tasks_from_modules_with_the_same_name(tmp_path): } == {"a.task_module", "b.task_module"} -@pytest.mark.end_to_end def test_collect_module_name(tmp_path): """We need to add a task module to the sys.modules. See #373 and #374.""" source = """ @@ -345,7 +333,6 @@ def task_my_task(): assert outcome == CollectionOutcome.SUCCESS -@pytest.mark.end_to_end def test_collect_string_product_raises_error_with_annotation(runner, tmp_path): """The string is not converted to a path.""" source = """ @@ -360,7 +347,6 @@ def task_write_text(out: Annotated[str, Product] = "out.txt") -> None: assert result.exit_code == ExitCode.FAILED -@pytest.mark.end_to_end def test_setting_name_for_path_node_via_annotation(tmp_path): source = """ from pathlib import Path @@ -380,7 +366,6 @@ def task_example( assert product.name == "product" -@pytest.mark.end_to_end def test_error_when_dependency_is_defined_in_kwargs_and_annotation(runner, tmp_path): source = """ from pathlib import Path @@ -400,7 +385,6 @@ def task_example( assert "ValueError: The value for the parameter 'in_'" in result.output -@pytest.mark.end_to_end def test_error_when_product_is_defined_in_kwargs_and_annotation(runner, tmp_path): source = """ from pathlib import Path @@ -419,7 +403,6 @@ def task_example(path: Annotated[Path, Product, node]) -> None: assert "ValueError: The value for the parameter 'path'" in result.output -@pytest.mark.end_to_end def test_error_when_using_kwargs_and_node_in_annotation(runner, tmp_path): source = """ from pathlib import Path @@ -436,7 +419,6 @@ def task_example(path: Annotated[Path, Path("file.txt"), Product]) -> None: ... assert "is defined twice" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize( "node", [ @@ -458,7 +440,6 @@ def task_example(path = {node}): ... assert all(i in result.output for i in ("only", "files", "are", "allowed")) -@pytest.mark.end_to_end @pytest.mark.parametrize( "node", [ @@ -482,7 +463,6 @@ def task_example(path: Annotated[Any, Product] = {node}): ... assert all(i in result.output for i in ("only", "files", "are", "allowed")) -@pytest.mark.end_to_end @pytest.mark.parametrize( "node", [ @@ -508,7 +488,6 @@ def task_example() -> Annotated[str, {node}]: assert session.tasks[0].produces["return"].name == tmp_path.name + "/file.txt" -@pytest.mark.end_to_end def test_error_when_return_annotation_cannot_be_parsed(runner, tmp_path): source = """ from typing import Annotated @@ -522,7 +501,6 @@ def task_example() -> Annotated[int, 1]: ... assert "The return annotation of the task" in result.output -@pytest.mark.end_to_end def test_scheduling_w_mixed_priorities(runner, tmp_path): source = """ import pytask @@ -540,7 +518,6 @@ def task_mixed(): pass assert "The task cannot have" in result.output -@pytest.mark.end_to_end def test_module_can_be_collected(runner, tmp_path): source = """ from pytask import Task, TaskWithoutPath, mark @@ -557,7 +534,6 @@ def __getattr__(self, name): assert "attr_that_definitely_does_not_exist" not in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize( "second_node", ["PythonNode()", "PathNode(path=Path('a.txt'))"] ) @@ -578,7 +554,6 @@ def task_example( assert "Parameter 'dependency' has multiple node annot" in result.output -@pytest.mark.end_to_end def test_error_if_multiple_return_annotations_are_used(runner, tmp_path): source = """ from pytask import task @@ -595,7 +570,6 @@ def task_example() -> Annotated[str, Path("file.txt")]: ... assert "The task uses multiple ways to parse" in result.output -@pytest.mark.end_to_end def test_print_warning_if_non_matching_path_is_passed(runner, tmp_path): tmp_path.joinpath("task.py").write_text("def task_example(): pass") result = runner.invoke(cli, [tmp_path.as_posix()]) diff --git a/tests/test_collect_command.py b/tests/test_collect_command.py index 1f6a14b9..a615648c 100644 --- a/tests/test_collect_command.py +++ b/tests/test_collect_command.py @@ -17,7 +17,6 @@ from tests.conftest import enter_directory -@pytest.mark.end_to_end def test_collect_task(runner, tmp_path): source = """ from pathlib import Path @@ -50,7 +49,6 @@ def task_example(path=Path("in.txt"), produces=Path("out.txt")): ... assert "out.txt>" in captured -@pytest.mark.end_to_end def test_collect_task_new_interface(runner, tmp_path): source = """ from pathlib import Path @@ -84,7 +82,6 @@ def task_example(depends_on=Path("in.txt"), arg=1, produces=Path("out.txt")): .. assert "arg" in captured -@pytest.mark.end_to_end def test_collect_task_in_root_dir(runner, tmp_path): source = """ from pathlib import Path @@ -105,7 +102,6 @@ def task_example(path=Path("in.txt"), produces=Path("out.txt")): ... assert "task_example>" in captured -@pytest.mark.end_to_end def test_collect_parametrized_tasks(runner, tmp_path): source = """ from pytask import task @@ -131,7 +127,6 @@ def task_example(depends_on=Path("in.txt"), arg=arg, produces=produces): assert "[depends_on1-1-out_1.txt]>" in captured -@pytest.mark.end_to_end def test_collect_task_with_expressions(runner, tmp_path): source = """ from pathlib import Path @@ -168,7 +163,6 @@ def task_example_2(path=Path("in_2.txt"), produces=Path("out_2.txt")): ... assert "out_1.txt>" in captured -@pytest.mark.end_to_end def test_collect_task_with_marker(runner, tmp_path): source = """ import pytask @@ -215,7 +209,6 @@ def task_example_2(path=Path("in_2.txt"), produces=Path("out_2.txt")): ... assert "out_1.txt>" in captured -@pytest.mark.end_to_end def test_collect_task_with_ignore_from_config(runner, tmp_path): source = """ from pathlib import Path @@ -263,7 +256,6 @@ def task_example_2(path=Path("in_2.txt"), produces=Path("out_2.txt")): ... assert "out_1.txt>" in captured -@pytest.mark.end_to_end def test_collect_task_with_ignore_from_cli(runner, tmp_path): source = """ from pathlib import Path @@ -322,7 +314,6 @@ def state(self): ... def function(depends_on, produces): ... -@pytest.mark.unit def test_print_collected_tasks_without_nodes(capsys): dictionary = { Path("task_path.py"): [ @@ -345,7 +336,6 @@ def test_print_collected_tasks_without_nodes(capsys): assert "" not in captured -@pytest.mark.unit def test_print_collected_tasks_with_nodes(capsys): dictionary = { Path("task_path.py"): [ @@ -369,7 +359,6 @@ def test_print_collected_tasks_with_nodes(capsys): assert "" in captured -@pytest.mark.unit @pytest.mark.parametrize(("show_nodes", "expected_add"), [(False, "src"), (True, "..")]) def test_find_common_ancestor_of_all_nodes(show_nodes, expected_add): tasks = [ @@ -392,7 +381,6 @@ def test_find_common_ancestor_of_all_nodes(show_nodes, expected_add): assert result == Path.cwd().joinpath(expected_add).resolve() -@pytest.mark.end_to_end def test_task_name_is_shortened(runner, tmp_path): tmp_path.joinpath("a", "b").mkdir(parents=True) tmp_path.joinpath("a", "b", "task_example.py").write_text("def task_example(): ...") @@ -404,7 +392,6 @@ def test_task_name_is_shortened(runner, tmp_path): assert "a/b/task_example.py::task_example" not in result.output -@pytest.mark.end_to_end def test_python_node_is_collected(runner, tmp_path): source = """ from pytask import Product @@ -429,7 +416,6 @@ def task_example( assert "Product" in captured -@pytest.mark.end_to_end def test_none_is_a_python_node(runner, tmp_path): source = """ from pytask import Product @@ -454,7 +440,6 @@ def task_example( assert "Product" in captured -@pytest.mark.end_to_end def test_python_nodes_are_aggregated_into_one(runner, tmp_path): source = """ from pytask import Product @@ -480,7 +465,6 @@ def task_example( assert "Product" in captured -@pytest.mark.end_to_end def test_node_protocol_for_custom_nodes(runner, tmp_path): source = """ from typing import Annotated @@ -513,7 +497,6 @@ def task_example( assert "" in result.output -@pytest.mark.end_to_end def test_node_protocol_for_custom_nodes_with_paths(runner, tmp_path): source = """ from typing import Annotated @@ -558,7 +541,6 @@ def task_example( assert "in.pkl" in result.output -@pytest.mark.end_to_end def test_setting_name_for_python_node_via_annotation(runner, tmp_path): source = """ from pathlib import Path @@ -579,7 +561,6 @@ def task_example( assert "Dependency" in result.output -@pytest.mark.end_to_end def test_more_nested_pytree_and_python_node_as_return(runner, snapshot_cli, tmp_path): source = """ from pathlib import Path @@ -604,7 +585,6 @@ def task_example() -> Annotated[Dict[str, str], nodes]: assert result.output == snapshot_cli() -@pytest.mark.end_to_end def test_more_nested_pytree_and_python_node_as_return_with_names( runner, snapshot_cli, tmp_path ): @@ -631,7 +611,6 @@ def task_example() -> Annotated[Dict[str, str], nodes]: assert result.output == snapshot_cli() -@pytest.mark.end_to_end @pytest.mark.parametrize( "node_def", [ @@ -671,7 +650,6 @@ def task_example({node_def}: ... assert "/*.txt>" in captured -@pytest.mark.end_to_end def test_collect_task_with_provisional_dependencies(runner, tmp_path): source = """ from typing import Annotated @@ -691,7 +669,6 @@ def task_example( assert "[ab].txt" in result.output -@pytest.mark.end_to_end def test_collect_custom_node_receives_default_name(runner, tmp_path): source = """ from typing import Annotated diff --git a/tests/test_collect_utils.py b/tests/test_collect_utils.py index a2f95735..a48ef9d9 100644 --- a/tests/test_collect_utils.py +++ b/tests/test_collect_utils.py @@ -2,13 +2,10 @@ from typing import Annotated -import pytest - from _pytask.collect_utils import _find_args_with_product_annotation from pytask import Product -@pytest.mark.unit def test_find_args_with_product_annotation(): def func( a: Annotated[int, Product], b: float, c, d: Annotated[int, float] diff --git a/tests/test_compat.py b/tests/test_compat.py index 96f084c2..08e72fe8 100644 --- a/tests/test_compat.py +++ b/tests/test_compat.py @@ -11,7 +11,6 @@ from pytask import import_optional_dependency -@pytest.mark.unit @pytest.mark.parametrize( ("name", "extra", "errors", "caller", "expectation", "expected"), [ @@ -79,7 +78,6 @@ def test_check_for_optional_program( # noqa: PLR0913 assert program_exists is expected -@pytest.mark.unit def test_import_optional(): match = "pytask requires .*notapackage.* pip .* conda .* 'notapackage'" with pytest.raises(ImportError, match=match) as exc_info: @@ -91,13 +89,11 @@ def test_import_optional(): assert result is None -@pytest.mark.unit def test_sqlalchemy_version_fallback(): pytest.importorskip("sqlalchemy") import_optional_dependency("sqlalchemy") -@pytest.mark.unit def test_bad_version(monkeypatch): name = "fakemodule" module = types.ModuleType(name) @@ -122,7 +118,6 @@ def test_bad_version(monkeypatch): assert result is module -@pytest.mark.unit def test_submodule(monkeypatch): # Create a fake module with a submodule name = "fakemodule" @@ -148,7 +143,6 @@ def test_submodule(monkeypatch): assert result is submodule -@pytest.mark.unit def test_no_version_raises(monkeypatch): name = "fakemodule" module = types.ModuleType(name) diff --git a/tests/test_config.py b/tests/test_config.py index 07ab99ce..c46f5edb 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -12,7 +12,6 @@ from tests.conftest import run_in_subprocess -@pytest.mark.end_to_end def test_debug_pytask(capsys, tmp_path): session = build(paths=tmp_path, debug_pytask=True) @@ -28,7 +27,6 @@ def test_debug_pytask(capsys, tmp_path): assert "finish pytask_execute --> None [hook]" in captured.out -@pytest.mark.end_to_end def test_pass_config_to_cli(tmp_path): config = """ [tool.pytask.ini_options] @@ -42,7 +40,6 @@ def test_pass_config_to_cli(tmp_path): assert "elton" in session.config["markers"] -@pytest.mark.end_to_end @pytest.mark.parametrize( "file_or_folder", ["folder_a", "folder_a/task_a.py", "folder_b", "folder_b/task_b.py"], @@ -66,7 +63,6 @@ def test_passing_paths_via_configuration_file(tmp_path, file_or_folder): assert len(session.tasks) == 1 -@pytest.mark.end_to_end def test_not_existing_path_in_config(runner, tmp_path): config = """ [tool.pytask.ini_options] @@ -78,7 +74,6 @@ def test_not_existing_path_in_config(runner, tmp_path): assert result.exit_code == ExitCode.CONFIGURATION_FAILED -@pytest.mark.end_to_end def test_paths_are_relative_to_configuration_file_cli(tmp_path): tmp_path.joinpath("src").mkdir() tmp_path.joinpath("tasks").mkdir() @@ -96,7 +91,6 @@ def test_paths_are_relative_to_configuration_file_cli(tmp_path): assert "1 Succeeded" in result.stdout -@pytest.mark.end_to_end @pytest.mark.skipif( sys.platform == "win32" and os.environ.get("CI") == "true", reason="Windows does not pick up the right Python interpreter.", @@ -125,7 +119,6 @@ def test_paths_are_relative_to_configuration_file(tmp_path): assert "1 Succeeded" in result.stdout -@pytest.mark.end_to_end def test_create_gitignore_file_in_pytask_directory(tmp_path): session = build(paths=tmp_path) diff --git a/tests/test_config_utils.py b/tests/test_config_utils.py index 2d0a460c..9378962f 100644 --- a/tests/test_config_utils.py +++ b/tests/test_config_utils.py @@ -7,7 +7,6 @@ from _pytask.config_utils import find_project_root_and_config -@pytest.mark.unit @pytest.mark.parametrize( ("config_filename", "paths", "expected_root", "expected_config"), [ @@ -44,7 +43,6 @@ def test_find_project_root_and_config( assert config == tmp_path.joinpath(expected_config) -@pytest.mark.unit @pytest.mark.parametrize( ("paths", "expected_root", "expected_config"), [(None, Path.cwd(), None), (["/mnt/home/", "C:/Users/"], Path.cwd(), None)], @@ -57,7 +55,6 @@ def test_find_project_root_and_config_w_no_intersecting_paths( assert config == expected_config -@pytest.mark.unit @pytest.mark.parametrize( ("vc_folder", "path", "expected"), [ diff --git a/tests/test_console.py b/tests/test_console.py index f9cd682a..d38cb60d 100644 --- a/tests/test_console.py +++ b/tests/test_console.py @@ -33,7 +33,6 @@ def task_func(): ... _SOURCE_LINE_TASK_FUNC = inspect.getsourcelines(task_func)[1] -@pytest.mark.unit @pytest.mark.parametrize( ("edtior_url_scheme", "expected"), [ @@ -53,7 +52,6 @@ def test_create_url_style_for_task(edtior_url_scheme, expected): assert style == Style.parse(expected.format(path=path)) -@pytest.mark.unit @pytest.mark.parametrize( ("edtior_url_scheme", "expected"), [ @@ -73,7 +71,6 @@ def test_create_url_style_for_path(edtior_url_scheme, expected): assert style == Style.parse(expected.format(path=path)) -@pytest.mark.unit @pytest.mark.parametrize( ("outcome", "outcome_enum", "total_description"), [(outcome, TaskOutcome, "description") for outcome in TaskOutcome] @@ -93,7 +90,6 @@ def test_create_summary_panel(capsys, outcome, outcome_enum, total_description): assert "description" in captured -@pytest.mark.unit @pytest.mark.parametrize( ("color_system", "text", "strip_styles", "expected"), [ @@ -111,7 +107,6 @@ def test_render_to_string(color_system, text, strip_styles, expected): _THIS_FILE = Path(__file__) -@pytest.mark.unit @pytest.mark.parametrize( ( "base_name", @@ -145,7 +140,6 @@ def test_format_task_id( _ROOT = Path.cwd() -@pytest.mark.integration @pytest.mark.parametrize( ("node", "paths", "expectation", "expected"), [ @@ -188,7 +182,6 @@ def test_reduce_node_name(node, paths, expectation, expected): exec("__unknown_lambda = lambda x: x") # noqa: S102 -@pytest.mark.unit @pytest.mark.parametrize( ("task_func", "skipped_paths", "expected"), [ @@ -212,7 +205,6 @@ def test_get_file(task_func, skipped_paths, expected): assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("task_func", "expected"), [ diff --git a/tests/test_dag.py b/tests/test_dag.py index aebf4d49..5503511f 100644 --- a/tests/test_dag.py +++ b/tests/test_dag.py @@ -14,7 +14,6 @@ from pytask import cli -@pytest.mark.unit @pytest.mark.skipif(sys.platform == "win32", reason="Hashes match only on unix.") def test_create_dag(): root = Path("src") @@ -37,7 +36,6 @@ def test_create_dag(): assert signature in dag.nodes -@pytest.mark.end_to_end def test_cycle_in_dag(tmp_path, runner, snapshot_cli): source = """ from pathlib import Path @@ -57,7 +55,6 @@ def task_2(path = Path("out_1.txt"), produces = Path("out_2.txt")): assert result.output == snapshot_cli() -@pytest.mark.end_to_end def test_two_tasks_have_the_same_product(tmp_path, runner, snapshot_cli): source = """ from pathlib import Path @@ -77,7 +74,6 @@ def task_2(produces = Path("out.txt")): assert result.output == snapshot_cli() -@pytest.mark.end_to_end def test_has_node_changed_catches_notnotfounderror(runner, tmp_path): """Missing nodes raise NodeNotFoundError when they do not exist and their state is requested.""" @@ -98,7 +94,6 @@ def task_example(produces = Path("file.txt")): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_python_nodes_are_unique(tmp_path): tmp_path.joinpath("a").mkdir() tmp_path.joinpath("a", "task_example.py").write_text("def task_example(a=1): pass") diff --git a/tests/test_dag_command.py b/tests/test_dag_command.py index bcde3370..db3465b6 100644 --- a/tests/test_dag_command.py +++ b/tests/test_dag_command.py @@ -27,7 +27,6 @@ _TEST_FORMATS = ["dot", "pdf", "png", "jpeg", "svg"] -@pytest.mark.end_to_end @pytest.mark.skipif(not _TEST_SHOULD_RUN, reason="pygraphviz is required") @pytest.mark.parametrize("layout", _GRAPH_LAYOUTS) @pytest.mark.parametrize("format_", _TEST_FORMATS) @@ -62,7 +61,6 @@ def task_example(path=Path("input.txt")): ... assert tmp_path.joinpath(f"dag.{format_}").exists() -@pytest.mark.end_to_end @pytest.mark.skipif(not _TEST_SHOULD_RUN, reason="pygraphviz is required") @pytest.mark.parametrize("layout", _GRAPH_LAYOUTS) @pytest.mark.parametrize("format_", _TEST_FORMATS) @@ -105,7 +103,6 @@ def _raise_exc(exc): raise exc -@pytest.mark.end_to_end def test_raise_error_with_graph_via_cli_missing_optional_dependency( monkeypatch, tmp_path, runner ): @@ -135,7 +132,6 @@ def task_example(path=Path("input.txt")): ... assert not tmp_path.joinpath("dag.png").exists() -@pytest.mark.end_to_end def test_raise_error_with_graph_via_task_missing_optional_dependency( monkeypatch, tmp_path, runner ): @@ -167,7 +163,6 @@ def task_create_graph(): assert not tmp_path.joinpath("dag.png").exists() -@pytest.mark.end_to_end def test_raise_error_with_graph_via_cli_missing_optional_program( monkeypatch, tmp_path, runner ): @@ -197,7 +192,6 @@ def task_example(path=Path("input.txt")): ... assert not tmp_path.joinpath("dag.png").exists() -@pytest.mark.end_to_end def test_raise_error_with_graph_via_task_missing_optional_program( monkeypatch, tmp_path, runner ): diff --git a/tests/test_dag_utils.py b/tests/test_dag_utils.py index 602cb218..aaeefa41 100644 --- a/tests/test_dag_utils.py +++ b/tests/test_dag_utils.py @@ -29,7 +29,6 @@ def dag(): return dag -@pytest.mark.unit def test_sort_tasks_topologically(dag): sorter = TopologicalSorter.from_dag(dag) topo_ordering = [] @@ -41,7 +40,6 @@ def test_sort_tasks_topologically(dag): assert topo_names == [f".::{i}" for i in range(5)] -@pytest.mark.unit def test_descending_tasks(dag): for i in range(5): task = next( @@ -54,7 +52,6 @@ def test_descending_tasks(dag): assert descendant_names == [f".::{i}" for i in range(i + 1, 5)] -@pytest.mark.unit def test_task_and_descending_tasks(dag): for i in range(5): task = next( @@ -67,7 +64,6 @@ def test_task_and_descending_tasks(dag): assert descendant_names == [f".::{i}" for i in range(i, 5)] -@pytest.mark.unit def test_node_and_neighbors(dag): for i in range(1, 4): task = next( @@ -80,7 +76,6 @@ def test_node_and_neighbors(dag): assert node_names == [f".::{j}" for j in range(i - 1, i + 2)] -@pytest.mark.unit @pytest.mark.parametrize( ("tasks", "expectation", "expected"), [ @@ -147,14 +142,12 @@ def test_extract_priorities_from_tasks(tasks, expectation, expected): assert result == expected -@pytest.mark.unit def test_raise_error_for_undirected_graphs(dag): undirected_graph = dag.to_undirected() with pytest.raises(ValueError, match="Only directed graphs have a"): TopologicalSorter.from_dag(undirected_graph) -@pytest.mark.unit def test_raise_error_for_cycle_in_graph(dag): dag.add_edge( "115f685b0af2aef0c7317a0b48562f34cfb7a622549562bd3d34d4d948b4fdab", @@ -164,14 +157,12 @@ def test_raise_error_for_cycle_in_graph(dag): TopologicalSorter.from_dag(dag) -@pytest.mark.unit def test_ask_for_invalid_number_of_ready_tasks(dag): scheduler = TopologicalSorter.from_dag(dag) with pytest.raises(ValueError, match="'n' must be"): scheduler.get_ready(0) -@pytest.mark.unit def test_instantiate_sorter_from_other_sorter(dag): name_to_sig = {dag.nodes[sig]["task"].name: sig for sig in dag.nodes} diff --git a/tests/test_data_catalog.py b/tests/test_data_catalog.py index 7f326263..4ada810c 100644 --- a/tests/test_data_catalog.py +++ b/tests/test_data_catalog.py @@ -21,13 +21,11 @@ IS_PEXPECT_INSTALLED = True -@pytest.mark.unit def test_data_catalog_knows_path_where_it_is_defined(): data_catalog = DataCatalog() assert Path(__file__).parent == data_catalog._instance_path -@pytest.mark.unit def test_data_catalog_collects_nodes(): data_catalog = DataCatalog() @@ -38,14 +36,12 @@ def test_data_catalog_collects_nodes(): assert isinstance(data_catalog["node"], PathNode) -@pytest.mark.unit def test_change_default_node(): data_catalog = DataCatalog(default_node=PythonNode) default_node = data_catalog["new_default_node"] assert isinstance(default_node, PythonNode) -@pytest.mark.end_to_end def test_use_data_catalog_in_workflow(runner, tmp_path): source = """ from pathlib import Path @@ -85,7 +81,6 @@ def task_save_text( ) -@pytest.mark.end_to_end def test_use_data_catalog_w_config(runner, tmp_path): source = """ from pathlib import Path @@ -118,7 +113,6 @@ def _flush(child): assert not child.isalive() -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_use_data_catalog_in_terminal(runner, tmp_path): @@ -149,7 +143,6 @@ def task_add_content() -> Annotated[str, data_catalog["new_content"]]: _flush(child) -@pytest.mark.end_to_end def test_use_data_catalog_with_different_name(runner, tmp_path): source = """ from pathlib import Path @@ -170,7 +163,6 @@ def task_add_content() -> Annotated[str, data_catalog["new_content"]]: ) -@pytest.mark.end_to_end def test_use_data_catalog_with_different_path(runner, tmp_path): source = """ from pathlib import Path @@ -189,27 +181,23 @@ def task_add_content() -> Annotated[str, data_catalog["new_content"]]: assert len(list(tmp_path.joinpath(".data").iterdir())) == 2 -@pytest.mark.unit def test_error_when_name_of_node_is_not_string(): data_catalog = DataCatalog() with pytest.raises(TypeError, match="The name of a catalog entry"): data_catalog.add(True, Path("file.txt")) -@pytest.mark.unit def test_requesting_new_node_with_python_node_as_default(): data_catalog = DataCatalog(default_node=PythonNode) assert isinstance(data_catalog["node"], PythonNode) -@pytest.mark.unit def test_adding_a_python_node(): data_catalog = DataCatalog() data_catalog.add("node", PythonNode(name="node", value=1)) assert isinstance(data_catalog["node"], PythonNode) -@pytest.mark.end_to_end def test_use_data_catalog_with_provisional_node(runner, tmp_path): source = """ from pathlib import Path @@ -237,7 +225,6 @@ def task_add_content( assert tmp_path.joinpath("output.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end def test_data_catalog_has_invalid_name(runner, tmp_path): source = """ from pytask import DataCatalog diff --git a/tests/test_database.py b/tests/test_database.py index f237908b..70981d91 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -2,7 +2,6 @@ import textwrap -import pytest from sqlalchemy.engine import make_url from pytask import DatabaseSession @@ -14,7 +13,6 @@ from pytask.path import hash_path -@pytest.mark.end_to_end def test_existence_of_hashes_in_db(tmp_path): """Modification dates of input and output files are stored in database.""" source = """ @@ -53,7 +51,6 @@ def task_write(path=Path("in.txt"), produces=Path("out.txt")): assert hash_ == hash_path(path, path.stat().st_mtime) -@pytest.mark.end_to_end def test_rename_database_w_config(tmp_path, runner): """Modification dates of input and output files are stored in database.""" path_to_db = tmp_path.joinpath(".db.sqlite") @@ -65,7 +62,6 @@ def test_rename_database_w_config(tmp_path, runner): assert path_to_db.exists() -@pytest.mark.end_to_end def test_rename_database_w_cli(tmp_path, runner): """Modification dates of input and output files are stored in database.""" path_to_db = tmp_path.joinpath(".db.sqlite") diff --git a/tests/test_debugging.py b/tests/test_debugging.py index 5c7d8266..fc38a71b 100644 --- a/tests/test_debugging.py +++ b/tests/test_debugging.py @@ -27,7 +27,6 @@ def _escape_ansi(line): return ansi_escape.sub("", line) -@pytest.mark.unit @pytest.mark.parametrize( ("value", "expected", "expectation"), [ @@ -52,7 +51,6 @@ def _flush(child): assert not child.isalive() -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_post_mortem_on_error(tmp_path): @@ -72,7 +70,6 @@ def task_example(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_post_mortem_on_error_w_kwargs(tmp_path): @@ -94,7 +91,6 @@ def task_example(path=Path("in.txt")): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_trace(tmp_path): @@ -112,7 +108,6 @@ def task_example(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_trace_w_kwargs(tmp_path): @@ -133,7 +128,6 @@ def task_example(path=Path("in.txt")): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_breakpoint(tmp_path): @@ -152,7 +146,6 @@ def task_example(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_set_trace(tmp_path): @@ -172,7 +165,6 @@ def task_example(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.xfail(os.environ.get("CI") == "true", reason="#312") @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") @@ -204,7 +196,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_set_trace_kwargs(tmp_path): @@ -234,7 +225,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_set_trace_interception(tmp_path): @@ -261,7 +251,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_set_trace_capturing_afterwards(tmp_path): @@ -284,7 +273,6 @@ def task_2(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.xfail(os.environ.get("CI") == "true", reason="#312") @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") @@ -325,7 +313,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_with_injected_do_debug(tmp_path): @@ -407,7 +394,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_without_capture(tmp_path): @@ -427,7 +413,6 @@ def task_1(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_used_outside_task(tmp_path): @@ -448,7 +433,6 @@ def test_pdb_used_outside_task(tmp_path): _flush(child) -@pytest.mark.end_to_end def test_printing_of_local_variables(tmp_path, runner): source = """ def task_example(): @@ -470,7 +454,6 @@ def helper(): assert "b = 2" in captured -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_set_trace_is_returned_after_pytask_finishes(tmp_path): @@ -492,7 +475,6 @@ def test_function(): _flush(child) -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_pdb_with_task_that_returns(tmp_path, runner): @@ -510,7 +492,6 @@ def task_example() -> Annotated[str, Path("data.txt")]: assert tmp_path.joinpath("data.txt").read_text() == "1" -@pytest.mark.end_to_end @pytest.mark.skipif(not IS_PEXPECT_INSTALLED, reason="pexpect is not installed.") @pytest.mark.skipif(sys.platform == "win32", reason="pexpect cannot spawn on Windows.") def test_trace_with_task_that_returns(tmp_path): diff --git a/tests/test_dry_run.py b/tests/test_dry_run.py index acc6a881..c9062149 100644 --- a/tests/test_dry_run.py +++ b/tests/test_dry_run.py @@ -2,13 +2,10 @@ import textwrap -import pytest - from pytask import ExitCode from pytask import cli -@pytest.mark.end_to_end def test_dry_run(runner, tmp_path): source = """ from pathlib import Path @@ -24,7 +21,6 @@ def task_example(produces=Path("out.txt")): produces.touch() assert not tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_dry_run_w_subsequent_task(runner, tmp_path): """Subsequent tasks would be executed if their previous task changed.""" source = """ @@ -59,7 +55,6 @@ def task_example(produces=Path("out.txt")): assert "2 Would be executed" in result.output -@pytest.mark.end_to_end def test_dry_run_w_subsequent_skipped_task(runner, tmp_path): """A skip is more important than a would be run.""" source_1 = """ @@ -95,7 +90,6 @@ def task_example(path=Path("out.txt"), produces=Path("out_2.txt")): assert "1 Skipped" in result.output -@pytest.mark.end_to_end def test_dry_run_skip(runner, tmp_path): source = """ import pytask @@ -117,7 +111,6 @@ def task_example(produces=Path("out.txt")): assert not tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_dry_run_skip_all(runner, tmp_path): source = """ import pytask @@ -137,7 +130,6 @@ def task_example_skip_subsequent(path=Path("out.txt")): ... assert "2 Skipped" in result.output -@pytest.mark.end_to_end def test_dry_run_skipped_successful(runner, tmp_path): source = """ from pathlib import Path @@ -158,7 +150,6 @@ def task_example(produces=Path("out.txt")): assert "1 Skipped because unchanged" in result.output -@pytest.mark.end_to_end def test_dry_run_persisted(runner, tmp_path): source = """ import pytask diff --git a/tests/test_execute.py b/tests/test_execute.py index 1cfe27c1..cc0dab73 100644 --- a/tests/test_execute.py +++ b/tests/test_execute.py @@ -23,7 +23,6 @@ from tests.conftest import run_in_subprocess -@pytest.mark.end_to_end def test_python_m_pytask(tmp_path): tmp_path.joinpath("task_module.py").write_text("def task_example(): pass") result = run_in_subprocess( @@ -32,7 +31,6 @@ def test_python_m_pytask(tmp_path): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_execute_w_autocollect(runner, tmp_path): tmp_path.joinpath("task_module.py").write_text("def task_example(): pass") with enter_directory(tmp_path): @@ -41,7 +39,6 @@ def test_execute_w_autocollect(runner, tmp_path): assert "1 Succeeded" in result.output -@pytest.mark.end_to_end def test_task_did_not_produce_node(tmp_path): source = """ from pathlib import Path @@ -57,7 +54,6 @@ def task_example(produces=Path("out.txt")): ... assert isinstance(session.execution_reports[0].exc_info[1], NodeNotFoundError) -@pytest.mark.end_to_end def test_task_did_not_produce_multiple_nodes_and_all_are_shown(runner, tmp_path): source = """ from pathlib import Path @@ -74,7 +70,6 @@ def task_example(produces=[Path("1.txt"), Path("2.txt")]): ... assert "2.txt" in result.output -@pytest.mark.end_to_end def test_missing_product(runner, tmp_path): source = """ from pathlib import Path @@ -89,7 +84,6 @@ def task_with_non_path_dependency(path: Annotated[Path, Product]): ... assert result.exit_code == ExitCode.FAILED -@pytest.mark.end_to_end def test_node_not_found_in_task_setup(tmp_path): """Test for :class:`_pytask.exceptions.NodeNotFoundError` in task setup. @@ -127,7 +121,6 @@ def task_3(paths = [Path("deleted.txt"), Path("out_2.txt")]): assert isinstance(report.exc_info[1], NodeNotFoundError) -@pytest.mark.end_to_end def test_depends_on_and_produces_can_be_used_in_task(tmp_path): source = """ from pathlib import Path @@ -145,7 +138,6 @@ def task_example(path=Path("in.txt"), produces=Path("out.txt")): assert tmp_path.joinpath("out.txt").read_text() == "Here I am. Once again." -@pytest.mark.end_to_end @pytest.mark.parametrize("n_failures", [1, 2, 3]) def test_execution_stops_after_n_failures(tmp_path, n_failures): source = """ @@ -161,7 +153,6 @@ def task_3(): raise Exception assert len(session.execution_reports) == n_failures -@pytest.mark.end_to_end @pytest.mark.parametrize("stop_after_first_failure", [False, True]) def test_execution_stop_after_first_failure(tmp_path, stop_after_first_failure): source = """ @@ -177,7 +168,6 @@ def task_3(): raise Exception assert len(session.execution_reports) == 1 if stop_after_first_failure else 3 -@pytest.mark.end_to_end def test_scheduling_w_priorities(tmp_path): source = """ import pytask @@ -200,7 +190,6 @@ def task_y(): pass assert session.execution_reports[2].task.name.endswith("task_y") -@pytest.mark.end_to_end @pytest.mark.parametrize("show_errors_immediately", [True, False]) def test_show_errors_immediately(runner, tmp_path, show_errors_immediately): source = """ @@ -224,7 +213,6 @@ def task_error(): raise ValueError assert len(matches_traceback) == 1 -@pytest.mark.end_to_end @pytest.mark.parametrize("verbose", [1, 2]) def test_traceback_of_previous_task_failed_is_not_shown(runner, tmp_path, verbose): source = """ @@ -244,7 +232,6 @@ def task_second(path=Path("in.txt")): ... ) -@pytest.mark.end_to_end def test_that_dynamically_creates_tasks_are_captured(runner, tmp_path): source = """ _DEFINITION = ''' @@ -263,7 +250,6 @@ def task_example(): assert "Collected 1 task" in result.output -@pytest.mark.end_to_end def test_task_executed_with_force_although_unchanged(tmp_path): source = """ from pytask import task @@ -280,7 +266,6 @@ def task_example_2(): pass assert session.execution_reports[0].outcome == TaskOutcome.SUCCESS -@pytest.mark.end_to_end def test_task_executed_with_force_although_unchanged_runner(runner, tmp_path): tmp_path.joinpath("task_module.py").write_text("def task_example(): pass") result = runner.invoke(cli, [tmp_path.as_posix()]) @@ -295,7 +280,6 @@ def test_task_executed_with_force_although_unchanged_runner(runner, tmp_path): assert "1 Succeeded" in result.output -@pytest.mark.end_to_end def test_task_is_not_reexecuted_when_modification_changed_file_not(runner, tmp_path): tmp_path.joinpath("task_example.py").write_text("def task_example(): pass") result = runner.invoke(cli, [tmp_path.as_posix()]) @@ -308,7 +292,6 @@ def test_task_is_not_reexecuted_when_modification_changed_file_not(runner, tmp_p assert "1 Skipped" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize("arg_name", ["path", "produces"]) def test_task_with_product_annotation(tmp_path, arg_name): """Using 'produces' with a product annotation should not cause an error.""" @@ -330,7 +313,6 @@ def task_example({arg_name}: Annotated[Path, Product] = Path("out.txt")) -> None assert arg_name in task.produces -@pytest.mark.end_to_end def test_task_errors_with_nested_product_annotation(tmp_path): source = """ from pathlib import Path @@ -352,7 +334,6 @@ def task_example( assert "paths_to_file" not in task.produces -@pytest.mark.end_to_end @pytest.mark.parametrize( "definition", [ @@ -389,7 +370,6 @@ def task_example( assert tmp_path.joinpath("out.txt").read_text() == "world" -@pytest.mark.end_to_end def test_return_with_path_annotation_as_return(runner, tmp_path): source = """ from pathlib import Path @@ -404,7 +384,6 @@ def task_example() -> Annotated[str, Path("file.txt")]: assert tmp_path.joinpath("file.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end def test_return_with_pathnode_annotation_as_return(runner, tmp_path): source = """ from pathlib import Path @@ -420,7 +399,6 @@ def task_example() -> Annotated[str, PathNode(path=Path("file.txt"))]: assert tmp_path.joinpath("file.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end @pytest.mark.parametrize( ("product_def", "return_def"), [ @@ -473,7 +451,6 @@ def task_example({product_def}: assert data == 1 -@pytest.mark.end_to_end def test_return_with_tuple_pathnode_annotation_as_return(runner, tmp_path): source = """ from pathlib import Path @@ -493,7 +470,6 @@ def task_example() -> Annotated[str, (node1, node2)]: assert tmp_path.joinpath("file2.txt").read_text() == "World!" -@pytest.mark.end_to_end def test_error_when_return_pytree_mismatch(runner, tmp_path): source = """ from pathlib import Path @@ -514,7 +490,6 @@ def task_example() -> Annotated[str, (node1, node2)]: assert "Return annotation: PyTreeSpec((*, *), NoneIsLeaf)" in result.output -@pytest.mark.end_to_end def test_pytree_and_python_node_as_return(runner, tmp_path): source = """ from pathlib import Path @@ -537,7 +512,6 @@ def task_example() -> Annotated[Dict[str, str], PythonNode(name="result")]: assert "1 Succeeded" in result.output -@pytest.mark.end_to_end def test_more_nested_pytree_and_python_node_as_return_with_names(runner, tmp_path): source = """ from pathlib import Path @@ -561,7 +535,6 @@ def task_example() -> Annotated[Dict[str, str], nodes]: assert "1 Succeeded" in result.output -@pytest.mark.end_to_end def test_more_nested_pytree_and_python_node_as_return(runner, tmp_path): source = """ from pathlib import Path @@ -581,7 +554,6 @@ def task_example() -> Annotated[Dict[str, str], nodes]: assert "1 Succeeded" in result.output -@pytest.mark.end_to_end def test_execute_tasks_and_pass_values_only_by_python_nodes(runner, tmp_path): source = """ from pytask import PythonNode @@ -604,7 +576,6 @@ def task_create_file( assert tmp_path.joinpath("file.txt").read_text() == "This is the text." -@pytest.mark.end_to_end @pytest.mark.xfail(sys.platform == "win32", reason="Decoding issues in Gitlab Actions.") def test_execute_tasks_via_functional_api(tmp_path): source = """ @@ -637,7 +608,6 @@ def create_file( assert tmp_path.joinpath("file.txt").read_text() == "This is the text." -@pytest.mark.end_to_end @pytest.mark.skipif( sys.platform == "win32" and os.environ.get("CI") == "true", reason="Windows does not pick up the right Python interpreter.", @@ -666,7 +636,6 @@ def task2() -> None: pass assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_pytask_on_a_module_that_uses_the_functional_api(tmp_path): source = """ from pytask import task, ExitCode, build @@ -684,13 +653,11 @@ def task_example(): pass assert "pytask tried to launch a second live display" in result.stdout.decode() -@pytest.mark.end_to_end def test_pass_non_task_to_functional_api_that_are_ignored(): session = pytask.build(tasks=None) assert len(session.tasks) == 0 -@pytest.mark.end_to_end def test_multiple_product_annotations(runner, tmp_path): source = """ from pytask import Product @@ -714,7 +681,6 @@ def task_second( assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_errors_during_loading_nodes_have_info(runner, tmp_path): source = """ from __future__ import annotations @@ -756,7 +722,6 @@ def task_example( assert "_pytask/execute.py" not in result.output -@pytest.mark.end_to_end def test_hashing_works(tmp_path): """Use subprocess or otherwise the cache is filled from other tests.""" source = """ @@ -781,7 +746,6 @@ def task_example() -> Annotated[str, Path("file.txt")]: assert hashes == hashes_ -@pytest.mark.end_to_end def test_python_node_as_product_with_product_annotation(runner, tmp_path): source = """ from typing import Annotated @@ -802,7 +766,6 @@ def task_write_file(text: Annotated[str, node]) -> Annotated[str, Path("file.txt assert tmp_path.joinpath("file.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end def test_pickle_node_as_product_with_product_annotation(runner, tmp_path): source = """ from typing import Annotated @@ -823,7 +786,6 @@ def task_write_file(text: Annotated[str, node]) -> Annotated[str, Path("file.txt assert tmp_path.joinpath("file.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end def test_check_if_root_nodes_are_available(tmp_path, runner): source = """ from pathlib import Path @@ -839,7 +801,6 @@ def task_d(path=Path("in.txt"), produces=Path("out.txt")): assert "NodeNotFoundError: 'task_d.py::task_d' requires" in result.output -@pytest.mark.end_to_end def test_check_if_root_nodes_are_available_w_name(tmp_path, runner): source = """ from pathlib import Path @@ -860,7 +821,6 @@ def task_e(in1_: Annotated[Path, node1], in2_: Annotated[Any, node2]): ... assert "input1" in result.output -@pytest.mark.end_to_end def test_check_if_root_nodes_are_available_with_separate_build_folder(tmp_path, runner): tmp_path.joinpath("src").mkdir() tmp_path.joinpath("bld").mkdir() @@ -879,7 +839,6 @@ def task_d(path=Path("../bld/in.txt"), produces=Path("out.txt")): assert "bld/in.txt" in result.output -@pytest.mark.end_to_end def test_error_when_node_state_throws_error(runner, tmp_path): source = """ from pytask import PythonNode @@ -894,7 +853,6 @@ def task_example(a = PythonNode(value={"a": 1}, hash=True)): assert "TypeError: unhashable type: 'dict'" in result.output -@pytest.mark.end_to_end def test_task_is_not_reexecuted(runner, tmp_path): source = """ from typing import Annotated @@ -919,7 +877,6 @@ def task_second(path = Path("out.txt")) -> Annotated[str, Path("copy.txt")]: assert "1 Skipped because unchanged" in result.output -@pytest.mark.end_to_end def test_use_functional_interface_with_task(tmp_path): def func(path): path.touch() @@ -938,7 +895,6 @@ def func(path): assert session.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_collect_task(runner, tmp_path): source = """ from pytask import Task, PathNode @@ -959,7 +915,6 @@ def func(path): path.touch() assert tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_collect_task_without_path(runner, tmp_path): source = """ from pytask import TaskWithoutPath, PathNode @@ -980,7 +935,6 @@ def func(path): path.touch() assert tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_download_file(runner, tmp_path): source = """ from pathlib import Path diff --git a/tests/test_git.py b/tests/test_git.py index b30bb813..81075399 100644 --- a/tests/test_git.py +++ b/tests/test_git.py @@ -5,7 +5,6 @@ from _pytask.git import is_git_installed -@pytest.mark.unit @pytest.mark.parametrize(("mock_return", "expected"), [(True, True), (None, False)]) def test_is_git_installed(monkeypatch, mock_return, expected): monkeypatch.setattr( diff --git a/tests/test_hashlib.py b/tests/test_hashlib.py index 8e495c02..cbfb6c33 100644 --- a/tests/test_hashlib.py +++ b/tests/test_hashlib.py @@ -7,7 +7,6 @@ from _pytask._hashlib import hash_value -@pytest.mark.unit @pytest.mark.parametrize( ("value", "expected"), [ diff --git a/tests/test_hook_module.py b/tests/test_hook_module.py index 53082897..eba48680 100644 --- a/tests/test_hook_module.py +++ b/tests/test_hook_module.py @@ -9,7 +9,6 @@ from tests.conftest import run_in_subprocess -@pytest.mark.end_to_end @pytest.mark.parametrize("module_name", [True, False]) def test_add_new_hook_via_cli(tmp_path, module_name): hooks = """ @@ -52,7 +51,6 @@ def pytask_extend_command_line_interface(cli): assert "--new-option" in result.stdout -@pytest.mark.end_to_end @pytest.mark.parametrize("module_name", [True, False]) def test_add_new_hook_via_config(tmp_path, module_name): tmp_path.joinpath("pyproject.toml").write_text( @@ -89,7 +87,6 @@ def pytask_extend_command_line_interface(cli): assert "--new-option" in result.stdout -@pytest.mark.end_to_end def test_error_when_hook_module_path_does_not_exist(tmp_path): result = subprocess.run( # noqa: PLW1510 ("pytask", "build", "--hook-module", "hooks.py", "--help"), @@ -100,7 +97,6 @@ def test_error_when_hook_module_path_does_not_exist(tmp_path): assert b"Error: Invalid value for '--hook-module'" in result.stderr -@pytest.mark.end_to_end def test_error_when_hook_module_module_does_not_exist(tmp_path): result = subprocess.run( # noqa: PLW1510 ("pytask", "build", "--hook-module", "hooks", "--help"), @@ -111,7 +107,6 @@ def test_error_when_hook_module_module_does_not_exist(tmp_path): assert b"Error: Invalid value for '--hook-module':" in result.stderr -@pytest.mark.end_to_end def test_error_when_hook_module_is_no_iterable(tmp_path): tmp_path.joinpath("pyproject.toml").write_text( "[tool.pytask.ini_options]\nhook_module = 'hooks'" diff --git a/tests/test_ignore.py b/tests/test_ignore.py index 88a7278c..ba1964ff 100644 --- a/tests/test_ignore.py +++ b/tests/test_ignore.py @@ -10,7 +10,6 @@ from pytask import build -@pytest.mark.end_to_end @pytest.mark.parametrize("ignored_folder", [*_IGNORED_FOLDERS, "pytask.egg-info"]) def test_ignore_default_paths(tmp_path, ignored_folder): folder = ignored_folder.split("/*")[0] @@ -22,7 +21,6 @@ def test_ignore_default_paths(tmp_path, ignored_folder): assert len(session.tasks) == 0 -@pytest.mark.end_to_end @pytest.mark.parametrize("ignore", ["", "*task_module.py"]) @pytest.mark.parametrize("new_line", [True, False]) def test_ignore_paths(tmp_path, ignore, new_line): @@ -38,7 +36,6 @@ def test_ignore_paths(tmp_path, ignore, new_line): assert len(session.tasks) == 0 if ignore else len(session.tasks) == 1 -@pytest.mark.unit @pytest.mark.parametrize( ("path", "ignored_paths", "expected"), [ diff --git a/tests/test_live.py b/tests/test_live.py index c5727fdc..25431312 100644 --- a/tests/test_live.py +++ b/tests/test_live.py @@ -15,7 +15,6 @@ from pytask import cli -@pytest.mark.end_to_end @pytest.mark.parametrize("verbose", [0, 1]) def test_verbose_mode_execution(tmp_path, runner, verbose): source = "def task_example(): pass" @@ -28,7 +27,6 @@ def test_verbose_mode_execution(tmp_path, runner, verbose): assert ("task_module.py::task_example" in result.output) is (verbose >= 1) -@pytest.mark.unit def test_live_execution_sequentially(capsys, tmp_path): path = tmp_path.joinpath("task_module.py") task = Task(base_name="task_example", path=path, function=lambda x: x) @@ -84,7 +82,6 @@ def test_live_execution_sequentially(capsys, tmp_path): assert "Completed: 1/x" in captured.out -@pytest.mark.unit @pytest.mark.parametrize("verbose", [1, 2]) @pytest.mark.parametrize("outcome", TaskOutcome) def test_live_execution_displays_skips_and_persists(capsys, tmp_path, verbose, outcome): @@ -134,7 +131,6 @@ def test_live_execution_displays_skips_and_persists(capsys, tmp_path, verbose, o assert "running" not in captured.out -@pytest.mark.unit @pytest.mark.parametrize("n_entries_in_table", [1, 2]) def test_live_execution_displays_subset_of_table(capsys, tmp_path, n_entries_in_table): path = tmp_path.joinpath("task_module.py") @@ -188,7 +184,6 @@ def test_live_execution_displays_subset_of_table(capsys, tmp_path, n_entries_in_ assert "│ ." in captured.out -@pytest.mark.unit def test_live_execution_skips_do_not_crowd_out_displayed_tasks(capsys, tmp_path): path = tmp_path.joinpath("task_module.py") task = Task(base_name="task_example", path=path, function=lambda x: x) @@ -257,7 +252,6 @@ def test_live_execution_skips_do_not_crowd_out_displayed_tasks(capsys, tmp_path) assert "task_skip" not in captured.out -@pytest.mark.end_to_end def test_full_execution_table_is_displayed_at_the_end_of_execution(tmp_path, runner): source = """ from pytask import task @@ -282,7 +276,6 @@ def task_create_file(produces=Path(f"{i}.txt")): assert f"[produces{i}]" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize("sort_table", ["true", "false"]) def test_sort_table_option(tmp_path, runner, sort_table): source = """ diff --git a/tests/test_logging.py b/tests/test_logging.py index 6d18bef1..8ff4bf62 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -19,7 +19,6 @@ class DummyDist(NamedTuple): version: str -@pytest.mark.unit @pytest.mark.parametrize( ("plugins", "expected"), [ @@ -31,7 +30,6 @@ def test_format_plugin_names_and_versions(plugins, expected): assert _format_plugin_names_and_versions(plugins) == expected -@pytest.mark.unit @pytest.mark.parametrize( ("duration", "outcome", "expected"), [ @@ -63,7 +61,6 @@ def test_pytask_log_session_footer(capsys, duration, outcome, expected): assert expected in captured.out -@pytest.mark.end_to_end @pytest.mark.parametrize( ("func", "expected_1", "expected_2"), [ @@ -102,7 +99,6 @@ def test_logging_of_outcomes(tmp_path, runner, func, expected_1, expected_2): assert expected_2 in result.output -@pytest.mark.unit @pytest.mark.parametrize( ("amount", "unit", "short_label", "expectation", "expected"), [ @@ -134,7 +130,6 @@ def test_humanize_time(amount, unit, short_label, expectation, expected): assert result == expected -@pytest.mark.end_to_end @pytest.mark.parametrize("flag", ["--show-traceback", "--show-no-traceback"]) def test_show_traceback(runner, tmp_path, flag): source = "def task_raises(): raise Exception" diff --git a/tests/test_mark.py b/tests/test_mark.py index 1896fa15..5ed81f0e 100644 --- a/tests/test_mark.py +++ b/tests/test_mark.py @@ -12,20 +12,17 @@ from pytask import cli -@pytest.mark.unit @pytest.mark.parametrize("attribute", ["hookimpl", "mark"]) def test_mark_exists_in_pytask_namespace(attribute): assert attribute in sys.modules["pytask"].__all__ -@pytest.mark.unit def test_pytask_mark_notcallable() -> None: mark = MarkGenerator() with pytest.raises(TypeError): mark() -@pytest.mark.unit @pytest.mark.filterwarnings("ignore:Unknown pytask.mark.foo") def test_mark_with_param(): def some_function(): # pragma: no cover @@ -42,14 +39,12 @@ class SomeClass: assert pytask.mark.foo.with_args(SomeClass) is not SomeClass -@pytest.mark.unit def test_pytask_mark_name_starts_with_underscore(): mark = MarkGenerator() with pytest.raises(AttributeError): _ = mark._some_name -@pytest.mark.end_to_end def test_markers_command(tmp_path, runner): toml = """ [tool.pytask.ini_options] @@ -64,7 +59,6 @@ def test_markers_command(tmp_path, runner): assert out in result.output -@pytest.mark.end_to_end def test_ini_markers_whitespace(runner, tmp_path): tmp_path.joinpath("pyproject.toml").write_text( "[tool.pytask.ini_options]\nmarkers = {'a1 ' = 'this is a whitespace marker'}" @@ -82,7 +76,6 @@ def task_markers(): assert "1 Succeeded" in result.output -@pytest.mark.end_to_end @pytest.mark.filterwarnings("ignore:Unknown pytask.mark.") @pytest.mark.parametrize( ("expr", "expected_passed"), @@ -119,7 +112,6 @@ def task_two(): assert set(tasks_that_run) == set(expected_passed) -@pytest.mark.end_to_end @pytest.mark.parametrize( ("expr", "expected_passed"), [ @@ -163,7 +155,6 @@ def task_no_2(): assert set(tasks_that_run) == set(expected_passed) -@pytest.mark.end_to_end @pytest.mark.parametrize( ("expr", "expected_passed"), [ @@ -195,7 +186,6 @@ def task_func(arg=arg): assert set(tasks_that_run) == set(expected_passed) -@pytest.mark.end_to_end @pytest.mark.parametrize( ("expr", "expected_error"), [ @@ -242,7 +232,6 @@ def test_keyword_option_wrong_arguments( ) or expected_error in captured.out.replace("\n", "") -@pytest.mark.end_to_end def test_configuration_failed(runner, tmp_path): result = runner.invoke( cli, ["markers", "-c", tmp_path.joinpath("non_existent_path").as_posix()] @@ -250,7 +239,6 @@ def test_configuration_failed(runner, tmp_path): assert result.exit_code == ExitCode.CONFIGURATION_FAILED -@pytest.mark.end_to_end def test_selecting_task_with_keyword_should_run_predecessor(runner, tmp_path): source = """ from pathlib import Path @@ -268,7 +256,6 @@ def task_second(path=Path("first.txt")): ... assert "2 Succeeded" in result.output -@pytest.mark.end_to_end def test_selecting_task_with_marker_should_run_predecessor(runner, tmp_path): source = """ import pytask @@ -289,7 +276,6 @@ def task_second(path=Path("first.txt")): ... assert "Warnings" in result.output -@pytest.mark.end_to_end def test_selecting_task_with_keyword_ignores_other_task(runner, tmp_path): source = """ from pathlib import Path @@ -307,7 +293,6 @@ def task_second(): ... assert "1 Skipped" in result.output -@pytest.mark.end_to_end def test_selecting_task_with_marker_ignores_other_task(runner, tmp_path): source = """ import pytask @@ -328,7 +313,6 @@ def task_second(): ... assert "Warnings" in result.output -@pytest.mark.end_to_end def test_selecting_task_with_unknown_marker_raises_warning(runner, tmp_path): source = """ import pytask @@ -345,7 +329,6 @@ def task_example(): ... assert "Warnings" in result.output -@pytest.mark.end_to_end def test_different_mark_import(runner, tmp_path): source = """ from pytask import mark @@ -359,7 +342,6 @@ def task_write_text(): ... assert "Skipped" in result.output -@pytest.mark.end_to_end def test_error_with_unknown_marker_and_strict(runner, tmp_path): source = """ from pytask import mark @@ -373,7 +355,6 @@ def task_write_text(): ... assert "Unknown pytask.mark.unknown" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize("name", ["parametrize", "depends_on", "produces", "task"]) def test_error_with_depreacated_markers(runner, tmp_path, name): source = f""" @@ -388,7 +369,6 @@ def task_write_text(): ... assert f"@pytask.mark.{name}" in result.output -@pytest.mark.end_to_end def test_error_with_d(runner, tmp_path): source = """ from pytask import mark diff --git a/tests/test_mark_cli.py b/tests/test_mark_cli.py index 6ff17efa..a94c45dd 100644 --- a/tests/test_mark_cli.py +++ b/tests/test_mark_cli.py @@ -9,7 +9,6 @@ from pytask import cli -@pytest.mark.end_to_end def test_show_markers(runner): result = runner.invoke(cli, ["markers"]) @@ -28,7 +27,6 @@ def test_show_markers(runner): ) -@pytest.mark.end_to_end def test_markers_option(tmp_path, runner): toml = """ [tool.pytask.ini_options.markers] @@ -44,7 +42,6 @@ def test_markers_option(tmp_path, runner): assert all(marker in result.output for marker in ("a1", "a1some", "nodescription")) -@pytest.mark.end_to_end @pytest.mark.parametrize("marker_name", ["lkasd alksds", "1kasd"]) def test_marker_names(tmp_path, marker_name): toml = f""" diff --git a/tests/test_mark_expression.py b/tests/test_mark_expression.py index 241c6d5e..ac1b2dad 100644 --- a/tests/test_mark_expression.py +++ b/tests/test_mark_expression.py @@ -12,7 +12,6 @@ def evaluate(input_: str, matcher: Callable[[str], bool]) -> bool: return Expression.compile_(input_).evaluate(matcher) -@pytest.mark.unit def test_empty_is_false() -> None: assert not evaluate("", lambda ident: False) # noqa: ARG005 assert not evaluate("", lambda ident: True) # noqa: ARG005 @@ -20,7 +19,6 @@ def test_empty_is_false() -> None: assert not evaluate("\t", lambda ident: False) # noqa: ARG005 -@pytest.mark.unit @pytest.mark.parametrize( ("expr", "expected"), [ @@ -54,7 +52,6 @@ def test_basic(expr: str, expected: bool) -> None: assert evaluate(expr, matcher) is expected -@pytest.mark.unit @pytest.mark.parametrize( ("expr", "expected"), [ @@ -71,7 +68,6 @@ def test_syntax_oddeties(expr: str, expected: bool) -> None: assert evaluate(expr, matcher) is expected -@pytest.mark.unit @pytest.mark.parametrize( ("expr", "column", "message"), [ @@ -127,7 +123,6 @@ def test_syntax_errors(expr: str, column: int, message: str) -> None: assert excinfo.value.message == message -@pytest.mark.unit @pytest.mark.parametrize( "ident", [ @@ -161,7 +156,6 @@ def test_valid_idents(ident: str) -> None: assert evaluate(ident, {ident: True}.__getitem__) -@pytest.mark.unit @pytest.mark.parametrize( "ident", [ @@ -189,7 +183,6 @@ def test_invalid_idents(ident: str) -> None: evaluate(ident, lambda ident: True) # noqa: ARG005 -@pytest.mark.unit def test_backslash_not_treated_specially() -> None: r"""When generating nodeids, if the source name contains special characters like a newline, they are escaped into two characters like \n. diff --git a/tests/test_mark_structures.py b/tests/test_mark_structures.py index f1bd179d..9e6bbd51 100644 --- a/tests/test_mark_structures.py +++ b/tests/test_mark_structures.py @@ -5,7 +5,6 @@ import pytask -@pytest.mark.unit @pytest.mark.parametrize( ("lhs", "rhs", "expected"), [ @@ -19,7 +18,6 @@ def test__eq__(lhs, rhs, expected) -> None: assert (lhs == rhs) == expected -@pytest.mark.unit @pytest.mark.filterwarnings("ignore:Unknown pytask\\.mark\\.foo") def test_aliases() -> None: md = pytask.mark.foo(1, "2", three=3) diff --git a/tests/test_mark_utils.py b/tests/test_mark_utils.py index 70435de4..a916af51 100644 --- a/tests/test_mark_utils.py +++ b/tests/test_mark_utils.py @@ -14,7 +14,6 @@ from pytask import set_marks -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "expected"), [ @@ -35,7 +34,6 @@ def test_get_all_marks_from_task(markers, expected): assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "expected"), [ @@ -61,7 +59,6 @@ def func(): ... assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected"), [ @@ -84,7 +81,6 @@ def test_get_marks_from_task(markers, marker_name, expected): assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected"), [ @@ -112,7 +108,6 @@ def func(): ... assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected"), [ @@ -135,7 +130,6 @@ def test_has_mark_for_task(markers, marker_name, expected): assert result is expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected"), [ @@ -159,7 +153,6 @@ def func(): ... assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected_markers", "expected_others"), [ @@ -187,7 +180,6 @@ def test_remove_marks_from_task( assert result_markers == expected_markers -@pytest.mark.unit @pytest.mark.parametrize( ("markers", "marker_name", "expected_markers", "expected_others"), [ @@ -221,7 +213,6 @@ def func(): ... assert result_markers == expected_markers -@pytest.mark.unit @pytest.mark.parametrize( "markers", [ @@ -236,7 +227,6 @@ def test_set_marks_to_task(markers): assert result.markers == markers -@pytest.mark.unit @pytest.mark.parametrize( "markers", [ diff --git a/tests/test_node_protocols.py b/tests/test_node_protocols.py index d35fbb3e..faa3015b 100644 --- a/tests/test_node_protocols.py +++ b/tests/test_node_protocols.py @@ -3,13 +3,10 @@ import pickle import textwrap -import pytest - from pytask import ExitCode from pytask import cli -@pytest.mark.end_to_end def test_node_protocol_for_custom_nodes(runner, tmp_path): source = """ from typing import Annotated @@ -48,7 +45,6 @@ def task_example( assert "FutureWarning" not in result.output -@pytest.mark.end_to_end def test_node_protocol_for_custom_nodes_with_paths(runner, tmp_path): source = """ from typing import Annotated @@ -94,7 +90,6 @@ def task_example( assert tmp_path.joinpath("out.txt").read_text() == "text" -@pytest.mark.end_to_end def test_node_protocol_for_custom_nodes_adding_attributes(runner, tmp_path): source = """ from typing import Annotated diff --git a/tests/test_nodes.py b/tests/test_nodes.py index 4a75b2c8..63ed12bd 100644 --- a/tests/test_nodes.py +++ b/tests/test_nodes.py @@ -16,7 +16,6 @@ from pytask import TaskWithoutPath -@pytest.mark.unit @pytest.mark.parametrize( ("value", "hash_", "expected"), [ @@ -33,7 +32,6 @@ def test_hash_of_python_node(value, hash_, expected): assert state == expected -@pytest.mark.unit @pytest.mark.parametrize( ("node", "expected"), [ @@ -73,7 +71,6 @@ def test_signature(node, expected): assert node.signature == expected -@pytest.mark.unit @pytest.mark.parametrize( ("value", "exists", "expected"), [ @@ -93,7 +90,6 @@ def test_hash_of_path_node(tmp_path, value, exists, expected): assert state is expected -@pytest.mark.unit @pytest.mark.parametrize( ("value", "exists", "expected"), [ @@ -113,7 +109,6 @@ def test_hash_of_pickle_node(tmp_path, value, exists, expected): assert state is expected -@pytest.mark.unit @pytest.mark.parametrize( ("node", "protocol", "expected"), [ @@ -129,7 +124,6 @@ def test_comply_with_protocol(node, protocol, expected): assert isinstance(node, protocol) is expected -@pytest.mark.unit def test_custom_serializer_deserializer_pickle_node(tmp_path): """Test that PickleNode correctly uses cloudpickle for de-/serialization.""" diff --git a/tests/test_outcomes.py b/tests/test_outcomes.py index cb366ad6..5e417b20 100644 --- a/tests/test_outcomes.py +++ b/tests/test_outcomes.py @@ -9,7 +9,6 @@ from pytask import count_outcomes -@pytest.mark.unit @pytest.mark.parametrize("outcome_in_report", CollectionOutcome) def test_count_outcomes_collection(outcome_in_report): reports = [CollectionReport(outcome_in_report, None, None)] @@ -23,7 +22,6 @@ def test_count_outcomes_collection(outcome_in_report): assert count == 0 -@pytest.mark.unit @pytest.mark.parametrize("outcome_in_report", TaskOutcome) def test_count_outcomes_tasks(outcome_in_report): reports = [ExecutionReport(None, outcome_in_report, None, None)] diff --git a/tests/test_path.py b/tests/test_path.py index d03bf288..4a285f07 100644 --- a/tests/test_path.py +++ b/tests/test_path.py @@ -20,7 +20,6 @@ from pytask.path import import_path -@pytest.mark.unit @pytest.mark.parametrize( ("path", "source", "include_source", "expected"), [ @@ -33,7 +32,6 @@ def test_relative_to(path, source, include_source, expected): assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("path", "potential_ancestors", "expected"), [ @@ -51,7 +49,6 @@ def test_find_closest_ancestor(monkeypatch, path, potential_ancestors, expected) assert result == expected -@pytest.mark.unit @pytest.mark.parametrize( ("path_1", "path_2", "expectation", "expected"), [ @@ -107,7 +104,6 @@ def test_find_common_ancestor(path_1, path_2, expectation, expected): assert result == expected -@pytest.mark.unit @pytest.mark.skipif(sys.platform != "win32", reason="Only works on Windows.") @pytest.mark.parametrize( ("path", "existing_paths", "expected"), @@ -141,7 +137,6 @@ def simple_module(request, tmp_path: Path) -> Path: sys.modules.pop(module_name, None) -@pytest.mark.unit def test_importmode_importlib(request, simple_module: Path, tmp_path: Path) -> None: """`importlib` mode does not change sys.path.""" module = import_path(simple_module, root=tmp_path) @@ -153,7 +148,6 @@ def test_importmode_importlib(request, simple_module: Path, tmp_path: Path) -> N assert "_src.project" in sys.modules -@pytest.mark.unit def test_remembers_previous_imports(simple_module: Path, tmp_path: Path) -> None: """importlib mode called remembers previous module (pytest#10341, pytest#10811).""" module1 = import_path(simple_module, root=tmp_path) @@ -161,7 +155,6 @@ def test_remembers_previous_imports(simple_module: Path, tmp_path: Path) -> None assert module1 is module2 -@pytest.mark.unit def test_no_meta_path_found( simple_module: Path, monkeypatch: pytest.MonkeyPatch, tmp_path: Path ) -> None: @@ -185,7 +178,6 @@ def test_no_meta_path_found( import_path(simple_module, root=tmp_path) -@pytest.mark.unit class TestImportLibMode: def test_importmode_importlib_with_dataclass(self, tmp_path: Path) -> None: """ diff --git a/tests/test_persist.py b/tests/test_persist.py index be48d43d..c5714412 100644 --- a/tests/test_persist.py +++ b/tests/test_persist.py @@ -21,13 +21,11 @@ class DummyClass: pass -@pytest.mark.end_to_end def test_persist_marker_is_set(tmp_path): session = build(paths=tmp_path) assert "persist" in session.config["markers"] -@pytest.mark.end_to_end def test_multiple_runs_with_persist(tmp_path): """Perform multiple consecutive runs and check intermediate outcomes with persist. @@ -86,7 +84,6 @@ def task_dummy(path=Path("in.txt"), produces=Path("out.txt")): assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged) -@pytest.mark.end_to_end def test_migrating_a_whole_task_with_persist(tmp_path): source = """ import pytask @@ -110,7 +107,6 @@ def task_dummy(depends_on=Path("in.txt"), produces=Path("out.txt")): assert isinstance(session.execution_reports[0].exc_info[1], Persisted) -@pytest.mark.unit @pytest.mark.parametrize( ("exc_info", "expected"), [ diff --git a/tests/test_profile.py b/tests/test_profile.py index 770e711a..0ffde996 100644 --- a/tests/test_profile.py +++ b/tests/test_profile.py @@ -13,7 +13,6 @@ from pytask import create_database -@pytest.mark.end_to_end def test_duration_is_stored_in_task(tmp_path): source = """ import time @@ -38,14 +37,12 @@ def task_example(): time.sleep(2) assert runtime.duration > 2 -@pytest.mark.end_to_end def test_profile_if_no_tasks_are_collected(tmp_path, runner): result = runner.invoke(cli, ["profile", tmp_path.as_posix()]) assert result.exit_code == ExitCode.OK assert "No information is stored on the collected tasks." in result.output -@pytest.mark.end_to_end def test_profile_if_there_is_no_information_on_collected_tasks(tmp_path, runner): source = """ import time @@ -60,7 +57,6 @@ def task_example(): time.sleep(2) assert "No information is stored on the collected tasks." in result.output -@pytest.mark.end_to_end def test_profile_if_there_is_information_on_collected_tasks(tmp_path, runner): source = """ import time @@ -78,7 +74,6 @@ def task_example(produces=Path("out.txt")): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end @pytest.mark.parametrize("export", ["csv", "json"]) def test_export_of_profile(tmp_path, runner, export): source = """ @@ -94,7 +89,6 @@ def task_example(): time.sleep(2) assert tmp_path.joinpath(f"profile.{export}").exists() -@pytest.mark.unit @pytest.mark.parametrize( ("bytes_", "units", "expected"), [ diff --git a/tests/test_provisional.py b/tests/test_provisional.py index 6a247e1d..203e784f 100644 --- a/tests/test_provisional.py +++ b/tests/test_provisional.py @@ -2,15 +2,12 @@ import textwrap -import pytest - from pytask import ExitCode from pytask import TaskOutcome from pytask import build from pytask import cli -@pytest.mark.end_to_end def test_task_that_produces_provisional_path_node(tmp_path): source = """ from typing import Annotated @@ -36,7 +33,6 @@ def task_example( assert session.execution_reports[0].outcome == TaskOutcome.SKIP_UNCHANGED -@pytest.mark.end_to_end def test_task_that_depends_on_relative_provisional_path_node(tmp_path): source = """ from typing import Annotated @@ -60,7 +56,6 @@ def task_example( assert len(session.tasks[0].depends_on["paths"]) == 2 -@pytest.mark.end_to_end def test_task_that_depends_on_provisional_path_node_with_absolute_root_dir(tmp_path): source = """ from typing import Annotated @@ -87,7 +82,6 @@ def task_example( assert len(session.tasks[0].depends_on["paths"]) == 2 -@pytest.mark.end_to_end def test_task_that_depends_on_provisional_path_node_with_relative_root_dir(tmp_path): source = """ from typing import Annotated @@ -112,7 +106,6 @@ def task_example( assert len(session.tasks[0].depends_on["paths"]) == 2 -@pytest.mark.end_to_end def test_task_that_depends_on_provisional_task(runner, tmp_path): source = """ from typing import Annotated @@ -139,7 +132,6 @@ def task_depends( assert "2 Succeeded" in result.output -@pytest.mark.end_to_end def test_gracefully_fail_when_dag_raises_error(runner, tmp_path): source = """ from typing import Annotated @@ -168,7 +160,6 @@ def task_depends( assert "There are some tasks which produce" in result.output -@pytest.mark.end_to_end def test_provisional_task_generation(runner, tmp_path): source = """ from typing import Annotated @@ -202,7 +193,6 @@ def task_copy( assert tmp_path.joinpath("b-copy.txt").exists() -@pytest.mark.end_to_end def test_gracefully_fail_when_task_generator_raises_error(runner, tmp_path): source = """ from typing import Annotated @@ -223,7 +213,6 @@ def task_example( assert "1 Failed" in result.output -@pytest.mark.end_to_end def test_use_provisional_node_as_product_in_generator_without_rerun(runner, tmp_path): source = """ from typing import Annotated @@ -271,7 +260,6 @@ def task_example(path = DirectoryNode(root_dir=Path("files"), pattern="*.py")): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end def test_root_dir_is_created(runner, tmp_path): source = """ from typing import Annotated diff --git a/tests/test_shared.py b/tests/test_shared.py index e340f031..a4c7866e 100644 --- a/tests/test_shared.py +++ b/tests/test_shared.py @@ -14,7 +14,6 @@ from pytask import build -@pytest.mark.unit @pytest.mark.parametrize( ("x", "expected"), [([], set()), ([1, 2, 3, 1, 2], {1, 2}), (["a", "a", "b"], {"a"})], @@ -24,7 +23,6 @@ def test_find_duplicates(x, expected): assert result == expected -@pytest.mark.end_to_end def test_parse_markers(tmp_path): toml = """ [tool.pytask.ini_options.markers] @@ -40,7 +38,6 @@ def test_parse_markers(tmp_path): assert "a2" in session.config["markers"] -@pytest.mark.end_to_end @pytest.mark.parametrize( ("value", "enum", "expectation", "expected"), [ @@ -54,7 +51,6 @@ def test_convert_to_enum(value, enum, expectation, expected): assert result == expected -@pytest.mark.unit def test_unwrap_task_function(): def task(): pass @@ -77,7 +73,6 @@ def wrapper(): assert unwrap_task_function(decorated) is task -@pytest.mark.unit def test_no_unwrap_coiled(): coiled = pytest.importorskip("coiled") diff --git a/tests/test_skipping.py b/tests/test_skipping.py index e1d41abd..64547da6 100644 --- a/tests/test_skipping.py +++ b/tests/test_skipping.py @@ -23,7 +23,6 @@ class DummyClass: pass -@pytest.mark.end_to_end def test_skip_unchanged(tmp_path): source = """ def task_dummy(): @@ -38,7 +37,6 @@ def task_dummy(): assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged) -@pytest.mark.end_to_end def test_skip_unchanged_w_dependencies_and_products(tmp_path): source = """ from pathlib import Path @@ -61,7 +59,6 @@ def task_dummy(path=Path("in.txt"), produces=Path("out.txt")): assert tmp_path.joinpath("out.txt").read_text() == "Original content of in.txt." -@pytest.mark.end_to_end def test_skipif_ancestor_failed(tmp_path): source = """ from pathlib import Path @@ -81,7 +78,6 @@ def task_second(path=Path("out.txt")): ... assert isinstance(session.execution_reports[1].exc_info[1], SkippedAncestorFailed) -@pytest.mark.end_to_end def test_if_skip_decorator_is_applied_to_following_tasks(tmp_path): source = """ import pytask @@ -103,7 +99,6 @@ def task_second(path=Path("out.txt")): ... assert isinstance(session.execution_reports[1].exc_info[1], Skipped) -@pytest.mark.end_to_end @pytest.mark.parametrize( "mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"] ) @@ -124,7 +119,6 @@ def task_first(path=Path("in.txt")): assert isinstance(session.execution_reports[0].exc_info[1], Skipped) -@pytest.mark.end_to_end @pytest.mark.parametrize( "mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"] ) @@ -150,7 +144,6 @@ def task_second(path=Path("in.txt")): assert "task_second" in result.output -@pytest.mark.end_to_end def test_if_skipif_decorator_is_applied_skipping(tmp_path): source = """ import pytask @@ -179,7 +172,6 @@ def task_second(path=Path("out.txt")): assert session.execution_reports[0].exc_info[1].args[0] == "bla" -@pytest.mark.end_to_end def test_if_skipif_decorator_is_applied_execute(tmp_path): source = """ import pytask @@ -206,7 +198,6 @@ def task_second(path=Path("out.txt")): ... assert session.execution_reports[1].exc_info is None -@pytest.mark.end_to_end def test_if_skipif_decorator_is_applied_any_condition_matches(tmp_path): """Any condition of skipif has to be True and only their message is shown.""" source = """ @@ -240,7 +231,6 @@ def task_second(path=Path("out.txt")): assert session.execution_reports[0].exc_info[1].args[0] == "No, I am not." -@pytest.mark.unit @pytest.mark.parametrize( ("marker_name", "force", "expectation"), [ @@ -261,7 +251,6 @@ def test_pytask_execute_task_setup(marker_name, force, expectation): pytask_execute_task_setup(session=session, task=task) -@pytest.mark.end_to_end def test_skip_has_precedence_over_ancestor_failed(runner, tmp_path): source = """ from pathlib import Path @@ -278,7 +267,6 @@ def task_example_2(path=Path("file.txt")): ... assert "1 Skipped" in result.output -@pytest.mark.end_to_end def test_skipif_has_precedence_over_ancestor_failed(runner, tmp_path): source = """ from pathlib import Path diff --git a/tests/test_task.py b/tests/test_task.py index 4ac09a78..ba2df5fe 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -11,7 +11,6 @@ from pytask import cli -@pytest.mark.end_to_end @pytest.mark.parametrize("func_name", ["task_example", "func"]) @pytest.mark.parametrize("task_name", ["the_only_task", None]) def test_task_with_task_decorator(tmp_path, func_name, task_name): @@ -36,7 +35,6 @@ def {func_name}(produces=Path("out.txt")): assert session.tasks[0].name.endswith(f"task_module.py::{func_name}") -@pytest.mark.end_to_end def test_parametrization_in_for_loop(tmp_path, runner): source = """ from pytask import task @@ -57,7 +55,6 @@ def task_example(produces=Path(f"out_{i}.txt")): assert "task_example[produces1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_from_markers(tmp_path, runner): source = """ from pytask import task @@ -80,7 +77,6 @@ def example(path=Path(f"in_{i}.txt"), produces=Path(f"out_{i}.txt")): assert "example[path1-produces1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_from_signature(tmp_path, runner): source = """ from pytask import task @@ -103,7 +99,6 @@ def example(path=Path(f"in_{i}.txt"), produces=Path(f"out_{i}.txt")): assert "example[path1-produces1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_from_markers_and_args(tmp_path, runner): source = """ from pytask import task @@ -124,7 +119,6 @@ def example(produces=Path(f"out_{i}.txt"), i=i): assert "example[produces1-1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_from_decorator(tmp_path, runner): source = """ from pytask import task @@ -145,7 +139,6 @@ def example(produces, i): assert "deco_task[produces1-1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_with_ids(tmp_path, runner): source = """ from pytask import task @@ -166,7 +159,6 @@ def example(produces, i): assert "deco_task[1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_with_error(tmp_path, runner): source = """ from pytask import task @@ -189,7 +181,6 @@ def task_example(produces=Path(f"out_{i}.txt")): assert "task_example[produces1]" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_from_decorator_w_irregular_dicts(tmp_path, runner): source = """ from pytask import task @@ -218,7 +209,6 @@ def example(produces, i): assert "TypeError: example() missing 1 required" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_with_one_iteration(tmp_path, runner): source = """ from pytask import task @@ -239,7 +229,6 @@ def task_example(produces=Path(f"out_{i}.txt")): assert "Collected 1 task" in result.output -@pytest.mark.end_to_end def test_parametrization_in_for_loop_and_normal(tmp_path, runner): source = """ from pytask import task @@ -265,7 +254,6 @@ def task_example(produces=Path(f"out_1.txt")): assert "Collected 2 tasks" in result.output -@pytest.mark.end_to_end def test_parametrized_names_without_parametrization(tmp_path, runner): source = """ from pytask import task @@ -292,7 +280,6 @@ def task_example(produces=Path("out_2.txt")): assert "Collected 3 tasks" in result.output -@pytest.mark.end_to_end def test_order_of_decorator_does_not_matter(tmp_path, runner): source = """ from pytask import task, mark @@ -311,7 +298,6 @@ def task_example(produces=Path(f"out.txt")): assert "1 Skipped" in result.output -@pytest.mark.end_to_end def test_task_function_with_partialed_args(tmp_path, runner): source = """ import functools @@ -332,7 +318,6 @@ def func(produces, content): assert tmp_path.joinpath("out.txt").exists() -@pytest.mark.end_to_end def test_task_function_with_partialed_args_and_task_decorator(tmp_path, runner): source = """ from pytask import task @@ -355,7 +340,6 @@ def func(content): assert tmp_path.joinpath("out.txt").read_text() == "hello" -@pytest.mark.end_to_end def test_parametrized_tasks_without_arguments_in_signature(tmp_path, runner): """This happens when plugins replace the function with its own implementation. @@ -399,7 +383,6 @@ def task_example(produces=Path("out_2.txt")): assert "Collected 3 tasks" in result.output -@pytest.mark.end_to_end def test_that_dynamically_creates_tasks_are_captured(runner, tmp_path): source = """ from pytask import task @@ -423,7 +406,6 @@ def task_example(): assert "Collected 2 tasks" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize( "irregular_id", [1, (1,), [1], {1}, ["a"], list("abc"), ((1,), (2,)), ({0}, {1})] ) @@ -443,7 +425,6 @@ def task_example(): assert "Argument 'id' of @task" in result.output -@pytest.mark.end_to_end def test_raise_error_if_parametrization_produces_non_unique_tasks(tmp_path): source = """ from pytask import task @@ -460,7 +441,6 @@ def task_func(i=i): assert isinstance(session.collection_reports[0].exc_info[1], ValueError) -@pytest.mark.end_to_end def test_task_receives_unknown_kwarg(runner, tmp_path): source = """ from pytask import task @@ -473,7 +453,6 @@ def task_example(): pass assert result.exit_code == ExitCode.FAILED -@pytest.mark.end_to_end def test_task_receives_namedtuple(runner, tmp_path): source = """ from typing import Annotated, NamedTuple @@ -501,7 +480,6 @@ def task_example( assert tmp_path.joinpath("output.txt").read_text() == "Hello world!" -@pytest.mark.end_to_end def test_task_kwargs_overwrite_default_arguments(runner, tmp_path): source = """ from pytask import Product, task @@ -528,7 +506,6 @@ def task_example( assert not tmp_path.joinpath("not_used_out.txt").exists() -@pytest.mark.end_to_end @pytest.mark.parametrize( "node_def", ["PathNode(path=Path('file.txt'))", "Path('file.txt')"] ) @@ -548,7 +525,6 @@ def task_example(): assert tmp_path.joinpath("file.txt").read_text() == "Hello, World!" -@pytest.mark.end_to_end @pytest.mark.parametrize( "node_def", [ @@ -573,7 +549,6 @@ def task_example(): assert tmp_path.joinpath("file2.txt").read_text() == "World!" -@pytest.mark.end_to_end def test_error_when_function_is_defined_outside_loop_body(runner, tmp_path): source = """ from pathlib import Path @@ -593,7 +568,6 @@ def func(path: Annotated[Path, Product]): assert "id=None" in result.output -@pytest.mark.end_to_end def test_error_when_function_is_defined_outside_loop_body_with_id(runner, tmp_path): source = """ from pathlib import Path @@ -614,7 +588,6 @@ def func(path: Annotated[Path, Product]): assert "id=b.txt" in result.output -@pytest.mark.end_to_end def test_task_will_be_executed_after_another_one_with_string(runner, tmp_path): source = """ from pytask import task @@ -643,7 +616,6 @@ def task_first() -> Annotated[str, Path("out.txt")]: assert "1 Skipped because unchanged" in result.output -@pytest.mark.end_to_end @pytest.mark.parametrize("decorator", ["", "@task"]) def test_task_will_be_executed_after_another_one_with_function( runner, tmp_path, decorator @@ -667,7 +639,6 @@ def task_second(): assert result.exit_code == ExitCode.OK -@pytest.mark.end_to_end @pytest.mark.parametrize("decorator", ["", "@task"]) @pytest.mark.xfail( reason="Wrong python interpreter picked up in CI?", @@ -702,7 +673,6 @@ def task_second(): assert result.returncode == ExitCode.OK -@pytest.mark.end_to_end def test_raise_error_for_wrong_after_expression(runner, tmp_path): source = """ from pytask import task @@ -720,7 +690,6 @@ def task_example() -> Annotated[str, Path("out.txt")]: assert "Wrong expression passed to 'after'" in result.output -@pytest.mark.end_to_end def test_raise_error_with_builtin_function_as_task(runner, tmp_path): source = """ from pytask import task @@ -738,7 +707,6 @@ def test_raise_error_with_builtin_function_as_task(runner, tmp_path): assert "Builtin functions cannot be wrapped" in result.output -@pytest.mark.end_to_end def test_task_function_in_another_module(runner, tmp_path): source = """ def func(): diff --git a/tests/test_task_utils.py b/tests/test_task_utils.py index 9a80b1e2..d642a26c 100644 --- a/tests/test_task_utils.py +++ b/tests/test_task_utils.py @@ -16,7 +16,6 @@ from pytask import task -@pytest.mark.unit @pytest.mark.parametrize( ("arg_name", "arg_value", "i", "id_func", "expected"), [ @@ -47,7 +46,6 @@ class ExampleAttrs: b: str = "wonderful" -@pytest.mark.unit @pytest.mark.parametrize( ("kwargs", "expectation", "expected"), [ @@ -65,7 +63,6 @@ def test_parse_task_kwargs(kwargs, expectation, expected): assert result == expected -@pytest.mark.integration def test_default_values_of_pytask_meta(): @task() def task_example(): ... @@ -86,7 +83,6 @@ def task_func(x): # pragma: no cover pass -@pytest.mark.unit @pytest.mark.parametrize( ("func", "name", "expectation", "expected"), [ diff --git a/tests/test_traceback.py b/tests/test_traceback.py index 8e7708ae..ada8ea54 100644 --- a/tests/test_traceback.py +++ b/tests/test_traceback.py @@ -11,7 +11,6 @@ from pytask import console -@pytest.mark.end_to_end @pytest.mark.parametrize( ("value", "exception", "is_hidden"), [ @@ -46,14 +45,12 @@ def helper(): assert ("This variable should not be shown." in result.output) is not is_hidden -@pytest.mark.unit def test_render_traceback_with_string_traceback(): traceback = Traceback((Exception, Exception("Help"), "String traceback.")) rendered = render_to_string(traceback, console) assert "String traceback." in rendered -@pytest.mark.unit def test_passing_show_locals(): traceback = Traceback( (Exception, Exception("Help"), "String traceback."), show_locals=True diff --git a/tests/test_tree_util.py b/tests/test_tree_util.py index 3fab2e99..c2978923 100644 --- a/tests/test_tree_util.py +++ b/tests/test_tree_util.py @@ -13,7 +13,6 @@ from pytask.tree_util import tree_structure -@pytest.mark.end_to_end @pytest.mark.parametrize("arg_name", ["depends_on", "produces"]) def test_task_with_complex_product_did_not_produce_node(tmp_path, arg_name): source = f""" @@ -46,7 +45,6 @@ def task_example({arg_name}=complex): assert products == expected -@pytest.mark.end_to_end def test_profile_with_pytree(tmp_path, runner): source = """ import time @@ -75,7 +73,6 @@ def task_example( assert "86 bytes" in result.output -@pytest.mark.unit @pytest.mark.parametrize( ("prefix_tree", "full_tree", "strict", "expected"), [ diff --git a/tests/test_typing.py b/tests/test_typing.py index 51ca7051..55fe52b8 100644 --- a/tests/test_typing.py +++ b/tests/test_typing.py @@ -2,12 +2,9 @@ import functools -import pytest - from pytask import is_task_function -@pytest.mark.unit def test_is_task_function(): def func(): ... diff --git a/tests/test_warnings.py b/tests/test_warnings.py index ddf7cbbf..88110187 100644 --- a/tests/test_warnings.py +++ b/tests/test_warnings.py @@ -10,7 +10,6 @@ from tests.conftest import run_in_subprocess -@pytest.mark.end_to_end @pytest.mark.parametrize( "disable_warnings", [pytest.param(True, marks=pytest.mark.filterwarnings("ignore:warning!!!")), False], @@ -32,7 +31,6 @@ def task_example(): assert ("warning!!!" in result.output) is not disable_warnings -@pytest.mark.end_to_end @pytest.mark.parametrize( "disable_warnings", [pytest.param(True, marks=pytest.mark.filterwarnings("ignore:warning!!!")), False], @@ -56,7 +54,6 @@ def task_example(): assert "warning!!!" in session.warnings[0].message -@pytest.mark.end_to_end @pytest.mark.parametrize("add_marker", [False, True]) def test_disable_warnings_with_mark(tmp_path, runner, add_marker): decorator = "@pytask.mark.filterwarnings('ignore:warning!!!')" if add_marker else "" @@ -78,7 +75,6 @@ def task_example(): assert ("warning!!!" in result.output) is not add_marker -@pytest.mark.end_to_end @pytest.mark.parametrize( "disable_warnings", [pytest.param(True, marks=pytest.mark.filterwarnings("ignore:warning!!!")), False], @@ -102,7 +98,6 @@ def task_example(): assert ("warning!!!" in result.output) is not disable_warnings -@pytest.mark.end_to_end @pytest.mark.parametrize("add_config", [False, True]) def test_disable_warnings_with_config(tmp_path, runner, add_config): if add_config: @@ -125,7 +120,6 @@ def task_example(): assert ("warning!!!" in result.output) is not add_config -@pytest.mark.end_to_end @pytest.mark.parametrize("warning", ["DeprecationWarning", "PendingDeprecationWarning"]) def test_deprecation_warnings_are_not_captured(tmp_path, warning): path_to_warn_module = tmp_path.joinpath("warning.py") @@ -160,7 +154,6 @@ def warn_now(): assert "warning!!!" not in result.stdout -@pytest.mark.end_to_end def test_multiple_occurrences_of_warning_are_reduced(tmp_path, runner): source = """ import warnings @@ -183,7 +176,6 @@ def task_example(): assert result.output.count("task_example") in (30, 31) -@pytest.mark.end_to_end def test_collapsing_of_warnings(tmp_path, runner): source = """ import warnings @@ -201,7 +193,6 @@ def task_example(): assert "... in 1 more locations" in result.output -@pytest.mark.end_to_end def test_raise_error_when_filterwarnings_is_misspecified(tmp_path, runner): source = """ import warnings @@ -216,7 +207,6 @@ def task_example(): ... assert "arg is not a string" in result.output -@pytest.mark.end_to_end def test_wrong_value_in_config_in_filterwarnings(tmp_path, runner): tmp_path.joinpath("pyproject.toml").write_text( "[tool.pytask.ini_options]\nfilterwarnings = true"