diff --git a/.azure-pipelines/windows.yml b/.azure-pipelines/windows.yml index fcdd542223..970a3b788d 100644 --- a/.azure-pipelines/windows.yml +++ b/.azure-pipelines/windows.yml @@ -36,24 +36,6 @@ jobs: python -m pip install .[$(CHECK_TYPE)] SET NIBABEL_DATA_DIR=%CD%\\nibabel-data displayName: 'Install nibabel' - - script: | - mkdir for_testing - cd for_testing - cp ../.coveragerc . - nosetests --with-doctest --with-coverage --cover-package nibabel nibabel ^ - -I test_data ^ - -I test_environment ^ - -I test_euler ^ - -I test_giftiio ^ - -I test_netcdf ^ - -I test_pkg_info ^ - -I test_quaternions ^ - -I test_scaling ^ - -I test_scripts ^ - -I test_spaces ^ - -I test_testing - displayName: 'Nose tests' - condition: and(succeeded(), eq(variables['CHECK_TYPE'], 'nosetests')) - script: | mkdir for_testing cd for_testing diff --git a/.travis.yml b/.travis.yml index 9ca407a757..88bd146c14 100644 --- a/.travis.yml +++ b/.travis.yml @@ -27,10 +27,6 @@ python: jobs: include: - # Old nosetests - Remove soon - - python: 3.7 - env: - - CHECK_TYPE="nosetests" # Basic dependencies only - python: 3.5 env: @@ -127,23 +123,6 @@ script: cd doc make html; make doctest; - elif [ "${CHECK_TYPE}" == "nosetests" ]; then - # Change into an innocuous directory and find tests from installation - mkdir for_testing - cd for_testing - cp ../.coveragerc . - nosetests --with-doctest --with-coverage --cover-package nibabel nibabel \ - -I test_data \ - -I test_environment \ - -I test_euler \ - -I test_giftiio \ - -I test_netcdf \ - -I test_pkg_info \ - -I test_quaternions \ - -I test_scaling \ - -I test_scripts \ - -I test_spaces \ - -I test_testing elif [ "${CHECK_TYPE}" == "test" ]; then # Change into an innocuous directory and find tests from installation mkdir for_testing diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2ef2539c74..d09c5b7740 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -34,7 +34,3 @@ jobs: py38-x64: PYTHON_VERSION: '3.8' PYTHON_ARCH: 'x64' - nosetests: - PYTHON_VERSION: '3.6' - PYTHON_ARCH: 'x64' - CHECK_TYPE: 'nosetests' diff --git a/dev-requirements.txt b/dev-requirements.txt index aa0980c3b4..69302061bc 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,4 +1,3 @@ # Requirements for running tests -r requirements.txt -nose pytest diff --git a/doc/source/devel/advanced_testing.rst b/doc/source/devel/advanced_testing.rst index 0dc365ea1d..77b6522cb1 100644 --- a/doc/source/devel/advanced_testing.rst +++ b/doc/source/devel/advanced_testing.rst @@ -25,7 +25,7 @@ Long-running tests Long-running tests are not enabled by default, and can be resource-intensive. To run these tests: * Set environment variable ``NIPY_EXTRA_TESTS=slow``; -* Run ``nosetests``. +* Run ``pytest nibabel``. Note that some tests may require a machine with >4GB of RAM. diff --git a/doc/source/devel/make_release.rst b/doc/source/devel/make_release.rst index 25db5210b7..6a09d280b2 100644 --- a/doc/source/devel/make_release.rst +++ b/doc/source/devel/make_release.rst @@ -79,7 +79,7 @@ Release checklist * Make sure all tests pass (from the nibabel root directory):: - nosetests --with-doctest nibabel + pytest --doctest-modules nibabel * Make sure you are set up to use the ``try_branch.py`` - see https://github.com/nipy/nibotmi/blob/master/install.rst#trying-a-set-of-changes-on-the-buildbots diff --git a/doc/source/installation.rst b/doc/source/installation.rst index ed390578ff..fe02bcdbf2 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -90,7 +90,7 @@ Requirements * h5py_ (optional, for MINC2 support) * PyDICOM_ 0.9.9 or greater (optional, for DICOM support) * `Python Imaging Library`_ (optional, for PNG conversion in DICOMFS) -* nose_ 0.11 or greater and pytest_ (optional, to run the tests) +* pytest_ (optional, to run the tests) * sphinx_ (optional, to build the documentation) Get the development sources @@ -128,7 +128,7 @@ module to see if everything is fine. It should look something like this:: >>> -To run the nibabel test suite, from the terminal run ``nosetests nibabel`` or +To run the nibabel test suite, from the terminal run ``pytest nibabel`` or ``python -c "import nibabel; nibabel.test()``. To run an extended test suite that validates ``nibabel`` for long-running and diff --git a/nibabel/__init__.py b/nibabel/__init__.py index 2d3428289c..7c096a0033 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -36,29 +36,6 @@ For more detailed information see the :ref:`manual`. """ -# Package-wide test setup and teardown -_test_states = { - # Numpy changed print options in 1.14; we can update docstrings and remove - # these when our minimum for building docs exceeds that - 'legacy_printopt': None, - } - -def setup_package(): - """ Set numpy print style to legacy="1.13" for newer versions of numpy """ - import numpy as np - from distutils.version import LooseVersion - if LooseVersion(np.__version__) >= LooseVersion('1.14'): - if _test_states.get('legacy_printopt') is None: - _test_states['legacy_printopt'] = np.get_printoptions().get('legacy') - np.set_printoptions(legacy="1.13") - -def teardown_package(): - """ Reset print options when tests finish """ - import numpy as np - if _test_states.get('legacy_printopt') is not None: - np.set_printoptions(legacy=_test_states.pop('legacy_printopt')) - - # module imports from . import analyze as ana from . import spm99analyze as spm99 @@ -92,13 +69,105 @@ def teardown_package(): from . import streamlines from . import viewers -from numpy.testing import Tester -test = Tester().test -bench = Tester().bench -del Tester - from .pkg_info import get_pkg_info as _get_pkg_info def get_info(): return _get_pkg_info(os.path.dirname(__file__)) + + +def test(label=None, verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for nibabel using pytest + + The protocol mimics the ``numpy.testing.NoseTester.test()``. + Not all features are currently implemented. + + Parameters + ---------- + label : None + Unused. + verbose: int, optional + Verbosity value for test outputs. Positive values increase verbosity, and + negative values decrease it. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytest. + doctests: bool, optional + If True, run doctests in module. Default is False. + coverage: bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the + `coverage module `_). + raise_warnings : None + Unused. + timer : False + Unused. + + Returns + ------- + code : ExitCode + Returns the result of running the tests as a ``pytest.ExitCode`` enum + """ + import pytest + args = [] + + if label is not None: + raise NotImplementedError("Labels cannot be set at present") + + try: + verbose = int(verbose) + except ValueError: + pass + else: + if verbose > 0: + args.append("-" + "v" * verbose) + elif verbose < 0: + args.append("-" + "q" * -verbose) + + if extra_argv: + args.extend(extra_argv) + if doctests: + args.append("--doctest-modules") + if coverage: + args.extend(["--cov", "nibabel"]) + if raise_warnings: + raise NotImplementedError("Warning filters are not implemented") + if timer: + raise NotImplementedError("Timing is not implemented") + + args.extend(["--pyargs", "nibabel"]) + + pytest.main(args=args) + + +def bench(label=None, verbose=1, extra_argv=None): + """ + Run benchmarks for nibabel using pytest + + The protocol mimics the ``numpy.testing.NoseTester.bench()``. + Not all features are currently implemented. + + Parameters + ---------- + label : None + Unused. + verbose: int, optional + Verbosity value for test outputs. Positive values increase verbosity, and + negative values decrease it. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytest. + + Returns + ------- + code : ExitCode + Returns the result of running the tests as a ``pytest.ExitCode`` enum + """ + from pkg_resources import resource_filename + config = resource_filename("nibabel", "benchmarks/pytest.benchmark.ini") + args = [] + if extra_argv is not None: + args.extend(extra_argv) + args.extend(["-c", config]) + test(label, verbose, extra_argv=args) diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 4908848685..776a93000c 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -5,13 +5,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_load_save.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_load_save.py """ import sys diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 7fe79763d0..2ed9ec9ccd 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -5,13 +5,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_arrayproxy_slicing.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_arrayproxy_slicing.py """ from timeit import timeit diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 764e0390b5..8763784dc6 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -3,13 +3,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_fileslice.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_fileslice.py """ import sys diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 6aa9d9d861..1ca2bf95d0 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -5,13 +5,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_finite_range + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_finite_range.py """ import sys diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index 59198eac1a..46118df43e 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -5,13 +5,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_load_save.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_load_save.py """ import sys diff --git a/nibabel/benchmarks/bench_streamlines.py b/nibabel/benchmarks/bench_streamlines.py index fc1e39f8ad..5c49c9e177 100644 --- a/nibabel/benchmarks/bench_streamlines.py +++ b/nibabel/benchmarks/bench_streamlines.py @@ -5,13 +5,9 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also run -the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_streamlines.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_streamlines.py """ import numpy as np diff --git a/nibabel/benchmarks/pytest.benchmark.ini b/nibabel/benchmarks/pytest.benchmark.ini new file mode 100644 index 0000000000..734e6c7d4c --- /dev/null +++ b/nibabel/benchmarks/pytest.benchmark.ini @@ -0,0 +1,4 @@ +[pytest] +python_files = bench_*.py +python_functions = bench_* +addopts = --capture=no diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index eb864c62c0..460f0d40d6 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,8 +5,6 @@ Test running scripts """ -from numpy.testing import assert_raises - import pytest import nibabel as nib @@ -196,10 +194,10 @@ def test_main(): -7.24879837e+00]).astype(dtype="float32")]), ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) - with assert_raises(SystemExit): + with pytest.raises(SystemExit): np.testing.assert_equal(main(test_names, StringIO()), expected_difference) test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] - with assert_raises(SystemExit): + with pytest.raises(SystemExit): assert main(test_names_2, StringIO()) == "These files are identical." diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 3b02367951..d0d20e574a 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -6,10 +6,9 @@ from ..dwiparams import B2q, q2bg -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +import pytest -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_equal as np_assert_equal) +from numpy.testing import (assert_array_almost_equal, assert_equal as np_assert_equal) def test_b2q(): @@ -27,17 +26,20 @@ def test_b2q(): assert_array_almost_equal(-q * s, B2q(B)) # Massive negative eigs B = np.eye(3) * -1 - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) # no error if we up the tolerance q = B2q(B, tol=1) # Less massive negativity, dropping tol B = np.diag([-1e-14, 10., 1]) - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) assert_array_almost_equal(B2q(B, tol=5e-13), [0, 10, 0]) # Confirm that we assume symmetric B = np.eye(3) B[0, 1] = 1e-5 - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) def test_q2bg(): diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 2e537c63f2..9a2f803117 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -10,9 +10,7 @@ from nibabel.tmpdirs import InTemporaryDirectory from numpy.compat.py3k import asbytes -from nibabel.testing import data_path -from nibabel.testing import clear_and_catch_warnings -from nose.tools import assert_equal, assert_raises, assert_true, assert_false +from nibabel.testing import data_path, clear_and_catch_warnings from .test_tractogram import assert_tractogram_equal from ..tractogram import Tractogram, LazyTractogram @@ -82,50 +80,50 @@ def test_is_supported_detect_format(): # Test is_supported and detect_format functions # Empty file/string f = BytesIO() - assert_false(nib.streamlines.is_supported(f)) - assert_false(nib.streamlines.is_supported("")) - assert_true(nib.streamlines.detect_format(f) is None) - assert_true(nib.streamlines.detect_format("") is None) + assert not nib.streamlines.is_supported(f) + assert not nib.streamlines.is_supported("") + assert nib.streamlines.detect_format(f) is None + assert nib.streamlines.detect_format("") is None # Valid file without extension for tfile_cls in FORMATS.values(): f = BytesIO() f.write(asbytes(tfile_cls.MAGIC_NUMBER)) f.seek(0, os.SEEK_SET) - assert_true(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is tfile_cls # Wrong extension but right magic number for tfile_cls in FORMATS.values(): with tempfile.TemporaryFile(mode="w+b", suffix=".txt") as f: f.write(asbytes(tfile_cls.MAGIC_NUMBER)) f.seek(0, os.SEEK_SET) - assert_true(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is tfile_cls # Good extension but wrong magic number for ext, tfile_cls in FORMATS.items(): with tempfile.TemporaryFile(mode="w+b", suffix=ext) as f: f.write(b"pass") f.seek(0, os.SEEK_SET) - assert_false(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is None) + assert not nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is None # Wrong extension, string only f = "my_tractogram.asd" - assert_false(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is None) + assert not nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is None # Good extension, string only for ext, tfile_cls in FORMATS.items(): f = "my_tractogram" + ext - assert_true(nib.streamlines.is_supported(f)) - assert_equal(nib.streamlines.detect_format(f), tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) == tfile_cls # Extension should not be case-sensitive. for ext, tfile_cls in FORMATS.items(): f = "my_tractogram" + ext.upper() - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.detect_format(f) is tfile_cls class TestLoadSave(unittest.TestCase): @@ -135,12 +133,12 @@ def test_load_empty_file(self): for empty_filename in DATA['empty_filenames']: tfile = nib.streamlines.load(empty_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram assert_tractogram_equal(tfile.tractogram, DATA['empty_tractogram']) @@ -150,12 +148,12 @@ def test_load_simple_file(self): for simple_filename in DATA['simple_filenames']: tfile = nib.streamlines.load(simple_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram assert_tractogram_equal(tfile.tractogram, DATA['simple_tractogram']) @@ -165,12 +163,12 @@ def test_load_complex_file(self): for complex_filename in DATA['complex_filenames']: tfile = nib.streamlines.load(complex_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) @@ -191,19 +189,19 @@ def test_save_tractogram_file(self): trk_file = trk.TrkFile(tractogram) # No need for keyword arguments. - assert_raises(ValueError, nib.streamlines.save, - trk_file, "dummy.trk", header={}) + with self.assertRaises(ValueError): + nib.streamlines.save(trk_file, "dummy.trk", header={}) # Wrong extension. with clear_and_catch_warnings(record=True, modules=[nib.streamlines]) as w: trk_file = trk.TrkFile(tractogram) - assert_raises(ValueError, nib.streamlines.save, - trk_file, "dummy.tck", header={}) + with self.assertRaises(ValueError): + nib.streamlines.save(trk_file, "dummy.tck", header={}) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, ExtensionWarning)) - assert_true("extension" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, ExtensionWarning) + assert "extension" in str(w[0].message) with InTemporaryDirectory(): nib.streamlines.save(trk_file, "dummy.trk") @@ -250,9 +248,9 @@ def test_save_complex_file(self): ((not cls.SUPPORTS_DATA_PER_POINT) + (not cls.SUPPORTS_DATA_PER_STREAMLINE)) - assert_equal(len(w), nb_expected_warnings) + assert len(w) == nb_expected_warnings for i in range(nb_expected_warnings): - assert_true(issubclass(w[i].category, Warning)) + assert issubclass(w[i].category, Warning) tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) @@ -281,10 +279,12 @@ def test_save_sliced_tractogram(self): assert_tractogram_equal(tractogram, original_tractogram) def test_load_unknown_format(self): - assert_raises(ValueError, nib.streamlines.load, "") + with self.assertRaises(ValueError): + nib.streamlines.load("") def test_save_unknown_format(self): - assert_raises(ValueError, nib.streamlines.save, Tractogram(), "") + with self.assertRaises(ValueError): + nib.streamlines.save(Tractogram(), "") def test_save_from_generator(self): tractogram = Tractogram(DATA['streamlines'], diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index da5bce4b3f..2550ecf03d 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -4,7 +4,7 @@ from ..tractogram import Tractogram from ..tractogram_file import TractogramFile -from nose.tools import assert_raises, assert_equal +import pytest def test_subclassing_tractogram_file(): @@ -23,7 +23,8 @@ def load(cls, fileobj, lazy_load=True): def create_empty_header(cls): return None - assert_raises(TypeError, DummyTractogramFile, Tractogram()) + with pytest.raises(TypeError): + DummyTractogramFile(Tractogram()) # Missing 'load' method class DummyTractogramFile(TractogramFile): @@ -38,7 +39,8 @@ def save(self, fileobj): def create_empty_header(cls): return None - assert_raises(TypeError, DummyTractogramFile, Tractogram()) + with pytest.raises(TypeError): + DummyTractogramFile(Tractogram()) # Now we have everything required. class DummyTractogramFile(TractogramFile): @@ -57,12 +59,14 @@ def save(self, fileobj): dtf = DummyTractogramFile(Tractogram()) # Default create_empty_header is empty dict - assert_equal(dtf.header, {}) + assert dtf.header == {} def test_tractogram_file(): - assert_raises(NotImplementedError, TractogramFile.is_correct_format, "") - assert_raises(NotImplementedError, TractogramFile.load, "") + with pytest.raises(NotImplementedError): + TractogramFile.is_correct_format("") + with pytest.raises(NotImplementedError): + TractogramFile.load("") # Testing calling the 'save' method of `TractogramFile` object. class DummyTractogramFile(TractogramFile): @@ -78,6 +82,5 @@ def load(cls, fileobj, lazy_load=True): def save(self, fileobj): pass - assert_raises(NotImplementedError, - super(DummyTractogramFile, - DummyTractogramFile(Tractogram)).save, "") + with pytest.raises(NotImplementedError): + super(DummyTractogramFile, DummyTractogramFile(Tractogram)).save("") diff --git a/nibabel/streamlines/tests/test_utils.py b/nibabel/streamlines/tests/test_utils.py index 939ee9bb9e..bcdde6d013 100644 --- a/nibabel/streamlines/tests/test_utils.py +++ b/nibabel/streamlines/tests/test_utils.py @@ -4,7 +4,8 @@ from nibabel.testing import data_path from numpy.testing import assert_array_equal -from nose.tools import assert_raises + +import pytest from ..utils import get_affine_from_reference @@ -17,7 +18,8 @@ def test_get_affine_from_reference(): # Get affine from an numpy array. assert_array_equal(get_affine_from_reference(affine), affine) wrong_ref = np.array([[1, 2, 3], [4, 5, 6]]) - assert_raises(ValueError, get_affine_from_reference, wrong_ref) + with pytest.raises(ValueError): + get_affine_from_reference(wrong_ref) # Get affine from a `SpatialImage`. assert_array_equal(get_affine_from_reference(img), affine) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 883c0ec147..d56873d414 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -476,14 +476,14 @@ def test_log_checks(self): assert fhdr['an_integer'] == 1 assert (message == 'an_integer should be 1; set an_integer to 1') - assert_raises(*raiser) + pytest.raises(*raiser) # lower case string hdr = HC() hdr['a_str'] = 'Hello' # severity = 20 fhdr, message, raiser = self.log_chk(hdr, 20) assert (message == 'a_str should be lower case; ' 'set a_str to lower case') - assert_raises(*raiser) + pytest.raises(*raiser) def test_logger_error(self): # Check that we can reset the logger and error level diff --git a/setup.cfg b/setup.cfg index 13edca59ab..a180f71c8d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -55,13 +55,8 @@ spm = scipy style = flake8 -nosetests = - coverage - nose >=0.11 - pytest test = coverage - nose >=0.11 pytest !=5.3.4 pytest-cov all = @@ -87,6 +82,7 @@ console_scripts = nibabel = tests/data/* */tests/data/* + benchmarks/pytest.benchmark.ini [flake8] max-line-length = 100