diff --git a/README.rst b/README.rst index a81b8322..ebdee082 100644 --- a/README.rst +++ b/README.rst @@ -75,8 +75,8 @@ directly in the right directory. With a Hash Library ^^^^^^^^^^^^^^^^^^^ -Instead of comparing to baseline images, you can instead compare against a json -library of sha256 hashes. This has the advantage of not having to check baseline +Instead of comparing to baseline images, you can instead compare against a JSON +library of SHA-256 hashes. This has the advantage of not having to check baseline images into the repository with the tests, or download them from a remote source. @@ -91,8 +91,11 @@ Hybrid Mode: Hashes and Images ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is possible to configure both hashes and baseline images. In this scenario -the hashes will be compared first, with the baseline images only used if the hash -comparison fails. +only the hash comparison can determine the test result. If the hash comparison +fails, the test will fail, however a comparison to the baseline image will be +carried out so the actual difference can be seen. If the hash comparison passes, +the comparison to the baseline image is skipped (unless **results always** is +configured). This is especially useful if the baseline images are external to the repository containing the tests, and are accessed via HTTP. In this situation, if the hashes @@ -104,7 +107,7 @@ without having to modify the external images. Running Tests ^^^^^^^^^^^^^ -Once tests are written with either baseline images or a hash library to compare +Once tests are written with baseline images, a hash library, or both to compare against, the tests can be run with:: pytest --mpl @@ -118,12 +121,15 @@ Generating a Test Summary ^^^^^^^^^^^^^^^^^^^^^^^^^ By specifying the ``--mpl-generate-summary=html`` CLI argument, a HTML summary -page will be generated showing the result, log entry and RMS of each test, -and the hashes if configured. The baseline, diff and result image for each -failing test will be shown. If **Results always** is configured -(see section below), images for passing tests will also be shown. -If no baseline images are configured, just the result images will -be displayed. +page will be generated showing the test result, log entry and generated result +image. When in the (default) image comparison mode, the baseline image, diff +image and RMS (if any), and tolerance of each test will also be shown. +When in the hash comparison mode, the baseline hash and result hash will +also be shown. When in hybrid mode, all of these are included. + +When generating a HTML summary, the ``--mpl-results-always`` option is +automatically applied (see section below). Therefore images for passing +tests will also be shown. +---------------+---------------+---------------+ | |html all| | |html filter| | |html result| | @@ -188,28 +194,36 @@ running tests by running ``pytest`` with:: pytest --mpl --mpl-baseline-path=baseline_images -This directory will be interpreted as being relative to where the tests -are run. In addition, if both this option and the ``baseline_dir`` +This directory will be interpreted as being relative to where pytest +is run. However, if the ``--mpl-baseline-relative`` option is also +included, this directory will be interpreted as being relative to +the current test directory. +In addition, if both this option and the ``baseline_dir`` option in the ``mpl_image_compare`` decorator are used, the one in the decorator takes precedence. Results always ^^^^^^^^^^^^^^ -By default, result images are only generated for tests that fail. +By default, result images are only saved for tests that fail. Passing ``--mpl-results-always`` to pytest will force result images -to be generated for all tests, even for tests that pass. -If a baseline image exists, a diff image will also be generated. -All of these images will be shown in the summary (if requested). +to be saved for all tests, even for tests that pass. + +When in **hybrid mode**, even if a test passes hash comparison, +a comparison to the baseline image will also be carried out, +with the baseline image and diff image (if image comparison fails) +saved for all tests. This secondary comparison will not affect +the success status of the test. -This option is useful for always *comparing* the result images again +This option is useful for always *comparing* the result images against the baseline images, while only *assessing* the tests against the hash library. If you only update your baseline images after merging a PR, this option means that the generated summary will always show how the PR affects the baseline images, with the success status of each test (based on the hash library) also shown in the generated -summary. +summary. This option is applied automatically when generating +a HTML summary. Base style ^^^^^^^^^^ diff --git a/pytest_mpl/plugin.py b/pytest_mpl/plugin.py index 6efa6a42..d147e57c 100644 --- a/pytest_mpl/plugin.py +++ b/pytest_mpl/plugin.py @@ -140,7 +140,7 @@ def pytest_addoption(parser): group.addoption('--mpl-results-path', help=results_path_help, action='store') parser.addini('mpl-results-path', help=results_path_help) - results_always_help = ("Always generate result images, not just for failed tests. " + results_always_help = ("Always compare to baseline images and save result images, even for passing tests. " "This option is automatically applied when generating a HTML summary.") group.addoption('--mpl-results-always', action='store_true', help=results_always_help) @@ -272,7 +272,7 @@ def __init__(self, if len(unsupported_formats) > 0: raise ValueError(f"The mpl summary type(s) '{sorted(unsupported_formats)}' " "are not supported.") - # Ignore `results_always` and always save result images for HTML output + # When generating HTML always apply `results_always` if generate_summary & {'html', 'basic-html'}: results_always = True self.generate_summary = generate_summary @@ -431,7 +431,7 @@ def compare_image_to_baseline(self, item, fig, result_dir, summary=None): test_image = (result_dir / "result.png").absolute() fig.savefig(str(test_image), **savefig_kwargs) - summary['result_image'] = '%EXISTS%' + summary['result_image'] = test_image.relative_to(self.results_dir).as_posix() if not os.path.exists(baseline_image_ref): summary['status'] = 'failed' @@ -447,7 +447,7 @@ def compare_image_to_baseline(self, item, fig, result_dir, summary=None): # copy to our tmpdir to be sure to keep them in case of failure baseline_image = (result_dir / "baseline.png").absolute() shutil.copyfile(baseline_image_ref, baseline_image) - summary['baseline_image'] = '%EXISTS%' + summary['baseline_image'] = baseline_image.relative_to(self.results_dir).as_posix() # Compare image size ourselves since the Matplotlib # exception is a bit cryptic in this case and doesn't show @@ -472,7 +472,8 @@ def compare_image_to_baseline(self, item, fig, result_dir, summary=None): else: summary['status'] = 'failed' summary['rms'] = results['rms'] - summary['diff_image'] = '%EXISTS%' + diff_image = (result_dir / 'result-failed-diff.png').absolute() + summary['diff_image'] = diff_image.relative_to(self.results_dir).as_posix() template = ['Error: Image files did not match.', 'RMS Value: {rms}', 'Expected: \n {expected}', @@ -488,9 +489,7 @@ def load_hash_library(self, library_path): return json.load(fp) def compare_image_to_hash_library(self, item, fig, result_dir, summary=None): - new_test = False hash_comparison_pass = False - baseline_image_path = None if summary is None: summary = {} @@ -505,87 +504,58 @@ def compare_image_to_hash_library(self, item, fig, result_dir, summary=None): hash_library = self.load_hash_library(hash_library_filename) hash_name = self.generate_test_name(item) + baseline_hash = hash_library.get(hash_name, None) + summary['baseline_hash'] = baseline_hash test_hash = self.generate_image_hash(item, fig) summary['result_hash'] = test_hash - if hash_name not in hash_library: - new_test = True + if baseline_hash is None: # hash-missing summary['status'] = 'failed' - error_message = (f"Hash for test '{hash_name}' not found in {hash_library_filename}. " - f"Generated hash is {test_hash}.") - summary['status_msg'] = error_message - else: - summary['baseline_hash'] = hash_library[hash_name] + summary['status_msg'] = (f"Hash for test '{hash_name}' not found in {hash_library_filename}. " + f"Generated hash is {test_hash}.") + elif test_hash == baseline_hash: # hash-match + hash_comparison_pass = True + summary['status'] = 'passed' + summary['status_msg'] = 'Test hash matches baseline hash.' + else: # hash-diff + summary['status'] = 'failed' + summary['status_msg'] = (f"Hash {test_hash} doesn't match hash " + f"{baseline_hash} in library " + f"{hash_library_filename} for test {hash_name}.") # Save the figure for later summary (will be removed later if not needed) test_image = (result_dir / "result.png").absolute() fig.savefig(str(test_image), **savefig_kwargs) - summary['result_image'] = '%EXISTS%' + summary['result_image'] = test_image.relative_to(self.results_dir).as_posix() - if not new_test: - if test_hash == hash_library[hash_name]: - hash_comparison_pass = True - summary['status'] = 'passed' - summary['status_msg'] = 'Test hash matches baseline hash.' - else: - error_message = (f"Hash {test_hash} doesn't match hash " - f"{hash_library[hash_name]} in library " - f"{hash_library_filename} for test {hash_name}.") - summary['status'] = 'failed' - summary['status_msg'] = 'Test hash does not match baseline hash.' - - # If the compare has only been specified with hash and not baseline - # dir, don't attempt to find a baseline image at the default path. - if not hash_comparison_pass and not self.baseline_directory_specified(item) or new_test: - return error_message + # Hybrid mode (hash and image comparison) + if self.baseline_directory_specified(item): - # If this is not a new test try and get the baseline image. - if not new_test: - baseline_error = None - baseline_summary = {} - # Ignore Errors here as it's possible the reference image dosen't exist yet. - try: - baseline_image_path = self.obtain_baseline_image(item, result_dir) - baseline_image = baseline_image_path - if baseline_image and not baseline_image.exists(): - baseline_image = None - # Get the baseline and generate a diff image, always so that - # --mpl-results-always can be respected. + # Skip image comparison if hash matches (unless `--mpl-results-always`) + if hash_comparison_pass and not self.results_always: + return + + # Run image comparison + baseline_summary = {} # summary for image comparison to merge with hash comparison summary + try: # Ignore all errors as success does not influence the overall test result baseline_comparison = self.compare_image_to_baseline(item, fig, result_dir, summary=baseline_summary) - except Exception as e: - baseline_image = None - baseline_error = e - for k in ['baseline_image', 'diff_image', 'rms', 'tolerance', 'result_image']: - summary[k] = summary[k] or baseline_summary.get(k) - - # If the hash comparison passes then return - if hash_comparison_pass: + except Exception as baseline_error: # Append to test error later + baseline_comparison = str(baseline_error) + else: # Update main summary + for k in ['baseline_image', 'diff_image', 'rms', 'tolerance', 'result_image']: + summary[k] = summary[k] or baseline_summary.get(k) + + # Append the log from image comparison + r = baseline_comparison or "The comparison to the baseline image succeeded." + summary['status_msg'] += ("\n\n" + "Image comparison test\n" + "---------------------\n") + r + + if hash_comparison_pass: # Return None to indicate test passed return - - if baseline_image is None: - error_message += f"\nUnable to find baseline image for {item}." - if baseline_error: - error_message += f"\n{baseline_error}" - summary['status'] = 'failed' - summary['status_msg'] = error_message - return error_message - - summary['baseline_image'] = '%EXISTS%' - - # Override the tolerance (if not explicitly set) to 0 as the hashes are not forgiving - tolerance = compare.kwargs.get('tolerance', None) - if not tolerance: - compare.kwargs['tolerance'] = 0 - - comparison_error = (baseline_comparison or - "\nHowever, the comparison to the baseline image succeeded.") - - error_message = f"{error_message}\n{comparison_error}" - summary['status'] = 'failed' - summary['status_msg'] = error_message - return error_message + return summary['status_msg'] def pytest_runtest_setup(self, item): # noqa @@ -673,7 +643,7 @@ def item_function_wrapper(*args, **kwargs): if not self.results_always: shutil.rmtree(result_dir) for image_type in ['baseline_image', 'diff_image', 'result_image']: - summary[image_type] = None # image no longer %EXISTS% + summary[image_type] = None # image no longer exists else: self._test_results[str(pathify(test_name))] = summary pytest.fail(msg, pytrace=False) @@ -704,21 +674,6 @@ def pytest_unconfigure(self, config): json.dump(self._generated_hash_library, fp, indent=2) if self.generate_summary: - # Generate a list of test directories - dir_list = [p.relative_to(self.results_dir) - for p in self.results_dir.iterdir() if p.is_dir()] - - # Resolve image paths - for directory in dir_list: - test_name = directory.parts[-1] - for image_type, filename in [ - ('baseline_image', 'baseline.png'), - ('diff_image', 'result-failed-diff.png'), - ('result_image', 'result.png'), - ]: - if self._test_results[test_name][image_type] == '%EXISTS%': - self._test_results[test_name][image_type] = str(directory / filename) - if 'json' in self.generate_summary: summary = self.generate_summary_json() print(f"A JSON report can be found at: {summary}") diff --git a/pytest_mpl/summary/templates/filter.html b/pytest_mpl/summary/templates/filter.html index 2cf18da6..3cd8432f 100644 --- a/pytest_mpl/summary/templates/filter.html +++ b/pytest_mpl/summary/templates/filter.html @@ -16,7 +16,9 @@