diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 573bed02d3..fb29776f75 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -14,17 +14,15 @@ from .. import tck as tck_module from ..tck import TckFile -import pytest; pytestmark = pytest.mark.skip() - -from nose.tools import assert_equal, assert_raises, assert_true +import pytest from numpy.testing import assert_array_equal -from nibabel.testing import data_path, clear_and_catch_warnings +from ...testing_pytest import data_path, clear_and_catch_warnings from .test_tractogram import assert_tractogram_equal DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_tck_fname'] = pjoin(data_path, "empty.tck") @@ -71,8 +69,8 @@ def test_load_matlab_nan_file(self): for lazy_load in [False, True]: tck = TckFile.load(DATA['matlab_nan_tck_fname'], lazy_load=lazy_load) streamlines = list(tck.tractogram.streamlines) - assert_equal(len(streamlines), 1) - assert_equal(streamlines[0].shape, (108, 3)) + assert len(streamlines) == 1 + assert streamlines[0].shape == (108, 3) def test_writeable_data(self): data = DATA['simple_tractogram'] @@ -82,7 +80,7 @@ def test_writeable_data(self): for actual, expected_tgi in zip(tck.streamlines, data): assert_array_equal(actual, expected_tgi.streamline) # Test we can write to arrays - assert_true(actual.flags.writeable) + assert actual.flags.writeable actual[0, 0] = 99 def test_load_simple_file_in_big_endian(self): @@ -90,7 +88,7 @@ def test_load_simple_file_in_big_endian(self): tck = TckFile.load(DATA['simple_tck_big_endian_fname'], lazy_load=lazy_load) assert_tractogram_equal(tck.tractogram, DATA['simple_tractogram']) - assert_equal(tck.header['datatype'], 'Float32BE') + assert tck.header['datatype'] == 'Float32BE' def test_load_file_with_wrong_information(self): tck_file = open(DATA['simple_tck_fname'], 'rb').read() @@ -98,12 +96,15 @@ def test_load_file_with_wrong_information(self): # Simulate a TCK file where `datatype` has not the right endianness. new_tck_file = tck_file.replace(asbytes("Float32LE"), asbytes("Float32BE")) - assert_raises(DataError, TckFile.load, BytesIO(new_tck_file)) + + with pytest.raises(DataError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file with unsupported `datatype`. new_tck_file = tck_file.replace(asbytes("Float32LE"), asbytes("int32")) - assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(HeaderError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file with no `datatype` field. new_tck_file = tck_file.replace(b"datatype: Float32LE\n", b"") @@ -111,24 +112,25 @@ def test_load_file_with_wrong_information(self): new_tck_file = new_tck_file.replace(b"file: . 67\n", b"file: . 47\n") with clear_and_catch_warnings(record=True, modules=[tck_module]) as w: tck = TckFile.load(BytesIO(new_tck_file)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("Missing 'datatype'" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "Missing 'datatype'" in str(w[0].message) assert_array_equal(tck.header['datatype'], "Float32LE") # Simulate a TCK file with no `file` field. new_tck_file = tck_file.replace(b"\nfile: . 67", b"") with clear_and_catch_warnings(record=True, modules=[tck_module]) as w: tck = TckFile.load(BytesIO(new_tck_file)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("Missing 'file'" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "Missing 'file'" in str(w[0].message) assert_array_equal(tck.header['file'], ". 56") # Simulate a TCK file with `file` field pointing to another file. new_tck_file = tck_file.replace(b"file: . 67\n", b"file: dummy.mat 75\n") - assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(HeaderError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file which is missing a streamline delimiter. eos = TckFile.FIBER_DELIMITER.tostring() @@ -139,11 +141,13 @@ def test_load_file_with_wrong_information(self): buffer_size = 1. / 1024**2 # 1 bytes hdr = TckFile._read_header(BytesIO(new_tck_file)) tck_reader = TckFile._read(BytesIO(new_tck_file), hdr, buffer_size) - assert_raises(DataError, list, tck_reader) + with pytest.raises(DataError): + list(tck_reader) # Simulate a TCK file which is missing the end-of-file delimiter. new_tck_file = tck_file[:-len(eof)] - assert_raises(DataError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(DataError): + TckFile.load(BytesIO(new_tck_file)) def test_write_empty_file(self): tractogram = Tractogram(affine_to_rasmm=np.eye(4)) @@ -160,8 +164,7 @@ def test_write_empty_file(self): assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram) tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), - open(DATA['empty_tck_fname'], 'rb').read()) + assert tck_file.read() == open(DATA['empty_tck_fname'], 'rb').read() def test_write_simple_file(self): tractogram = Tractogram(DATA['streamlines'], @@ -179,17 +182,18 @@ def test_write_simple_file(self): assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram) tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), - open(DATA['simple_tck_fname'], 'rb').read()) + assert tck_file.read() == open(DATA['simple_tck_fname'], 'rb').read() # TCK file containing not well formatted entries in its header. tck_file = BytesIO() tck = TckFile(tractogram) tck.header['new_entry'] = 'value\n' # \n not allowed - assert_raises(HeaderError, tck.save, tck_file) + with pytest.raises(HeaderError): + tck.save(tck_file) tck.header['new_entry'] = 'val:ue' # : not allowed - assert_raises(HeaderError, tck.save, tck_file) + with pytest.raises(HeaderError): + tck.save(tck_file) def test_load_write_file(self): for fname in [DATA['empty_tck_fname'], @@ -204,7 +208,7 @@ def test_load_write_file(self): # Check that the written file is the same as the one read. tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), open(fname, 'rb').read()) + assert tck_file.read() == open(fname, 'rb').read() # Save tractogram that has an affine_to_rasmm. for lazy_load in [False, True]: diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index 8d4f01d766..f88631965e 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -7,11 +7,10 @@ from io import BytesIO -from nibabel.testing import data_path -from nibabel.testing import clear_and_catch_warnings, assert_arr_dict_equal -from nose.tools import assert_equal, assert_raises, assert_true +import pytest +from ...testing_pytest import data_path +from ...testing_pytest import clear_and_catch_warnings, assert_arr_dict_equal from numpy.testing import assert_array_equal -import pytest; pytestmark = pytest.mark.skip() from .test_tractogram import assert_tractogram_equal from ..tractogram import Tractogram @@ -24,7 +23,7 @@ DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_trk_fname'] = pjoin(data_path, "empty.trk") @@ -133,45 +132,51 @@ def test_load_file_with_wrong_information(self): trk_struct[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: trk = TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("identity" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "identity" in str(w[0].message) assert_array_equal(trk.affine, np.eye(4)) # Simulate a TRK where `vox_to_ras` is invalid. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_TO_RASMM] = np.diag([0, 0, 0, 1]) with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file where `voxel_order` was not provided. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_ORDER] = b'' with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("LPS" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "LPS" in str(w[0].message) # Simulate a TRK file with an unsupported version. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct['version'] = 123 - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) + # Simulate a TRK file with a wrong hdr_size. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct['hdr_size'] = 1234 - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file with a wrong scalar_name. trk_struct, trk_bytes = self.trk_with_bytes('complex_trk_fname') trk_struct['scalar_name'][0, 0] = b'colors\x003\x004' - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file with a wrong property_name. trk_struct, trk_bytes = self.trk_with_bytes('complex_trk_fname') trk_struct['property_name'][0, 0] = b'colors\x003\x004' - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) def test_load_trk_version_1(self): # Simulate and test a TRK (version 1). @@ -184,9 +189,9 @@ def test_load_trk_version_1(self): trk_struct['version'] = 1 with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: trk = TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("identity" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "identity" in str(w[0].message) assert_array_equal(trk.affine, np.eye(4)) assert_array_equal(trk.header['version'], 1) @@ -196,8 +201,8 @@ def test_load_complex_file_in_big_endian(self): # We use hdr_size as an indicator of little vs big endian. good_orders = '>' if sys.byteorder == 'little' else '>=' hdr_size = trk_struct['hdr_size'] - assert_true(hdr_size.dtype.byteorder in good_orders) - assert_equal(hdr_size, 1000) + assert hdr_size.dtype.byteorder in good_orders + assert hdr_size == 1000 for lazy_load in [False, True]: trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], @@ -206,7 +211,7 @@ def test_load_complex_file_in_big_endian(self): def test_tractogram_file_properties(self): trk = TrkFile.load(DATA['simple_trk_fname']) - assert_equal(trk.streamlines, trk.tractogram.streamlines) + assert trk.streamlines == trk.tractogram.streamlines assert_array_equal(trk.affine, trk.header[Field.VOXEL_TO_RASMM]) def test_write_empty_file(self): @@ -224,8 +229,7 @@ def test_write_empty_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['empty_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['empty_trk_fname'], 'rb').read() def test_write_simple_file(self): tractogram = Tractogram(DATA['streamlines'], @@ -243,8 +247,7 @@ def test_write_simple_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['simple_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['simple_trk_fname'], 'rb').read() def test_write_complex_file(self): # With scalars @@ -293,8 +296,7 @@ def test_write_complex_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['complex_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['complex_trk_fname'], 'rb').read() def test_load_write_file(self): for fname in [DATA['empty_trk_fname'], @@ -329,8 +331,7 @@ def test_load_write_LPS_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['standard_LPS_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['standard_LPS_trk_fname'], 'rb').read() # Test writing a file where the header is missing the # Field.VOXEL_ORDER. @@ -353,8 +354,7 @@ def test_load_write_LPS_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['standard_LPS_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['standard_LPS_trk_fname'], 'rb').read() def test_write_optional_header_fields(self): # The TRK file format doesn't support additional header fields. @@ -368,7 +368,7 @@ def test_write_optional_header_fields(self): trk_file.seek(0, os.SEEK_SET) new_trk = TrkFile.load(trk_file) - assert_true("extra" not in new_trk.header) + assert "extra" not in new_trk.header def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. @@ -396,7 +396,8 @@ def test_write_too_many_scalars_and_properties(self): affine_to_rasmm=np.eye(4)) trk = TrkFile(tractogram) - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) # TRK supports up to 10 data_per_streamline. data_per_streamline = {} @@ -422,7 +423,8 @@ def test_write_too_many_scalars_and_properties(self): data_per_streamline=data_per_streamline) trk = TrkFile(tractogram) - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) def test_write_scalars_and_properties_name_too_long(self): # TRK supports data_per_point name up to 20 characters. @@ -438,7 +440,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 18: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -449,7 +452,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 20: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -466,7 +470,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 18: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -477,7 +482,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 20: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -499,32 +505,37 @@ def test_header_read_restore(self): hdr_from_fname['_offset_data'] += hdr_pos # Correct for start position assert_arr_dict_equal(TrkFile._read_header(bio), hdr_from_fname) # Check fileobject file position has not changed - assert_equal(bio.tell(), hdr_pos) + assert bio.tell() == hdr_pos def test_encode_names(): # Test function for encoding numbers into property names b0 = b'\x00' - assert_equal(encode_value_in_name(0, 'foo', 10), - b'foo' + b0 * 7) - assert_equal(encode_value_in_name(1, 'foo', 10), - b'foo' + b0 * 7) - assert_equal(encode_value_in_name(8, 'foo', 10), - b'foo' + b0 + b'8' + b0 * 5) - assert_equal(encode_value_in_name(40, 'foobar', 10), - b'foobar' + b0 + b'40' + b0) - assert_equal(encode_value_in_name(1, 'foobarbazz', 10), b'foobarbazz') - assert_raises(ValueError, encode_value_in_name, 1, 'foobarbazzz', 10) - assert_raises(ValueError, encode_value_in_name, 2, 'foobarbaz', 10) - assert_equal(encode_value_in_name(2, 'foobarba', 10), b'foobarba\x002') + assert encode_value_in_name(0, 'foo', 10) == b'foo' + b0 * 7 + assert encode_value_in_name(1, 'foo', 10) == b'foo' + b0 * 7 + assert encode_value_in_name(8, 'foo', 10) == b'foo' + b0 + b'8' + b0 * 5 + assert encode_value_in_name(40, 'foobar', 10) == b'foobar' + b0 + b'40' + b0 + assert encode_value_in_name(1, 'foobarbazz', 10) == b'foobarbazz' + + with pytest.raises(ValueError): + encode_value_in_name(1, 'foobarbazzz', 10) + + with pytest.raises(ValueError): + encode_value_in_name(2, 'foobarbazzz', 10) + + assert encode_value_in_name(2, 'foobarba', 10) == b'foobarba\x002' def test_decode_names(): # Test function for decoding name string into name, number b0 = b'\x00' - assert_equal(decode_value_from_name(b''), ('', 0)) - assert_equal(decode_value_from_name(b'foo' + b0 * 7), ('foo', 1)) - assert_equal(decode_value_from_name(b'foo\x008' + b0 * 5), ('foo', 8)) - assert_equal(decode_value_from_name(b'foobar\x0010\x00'), ('foobar', 10)) - assert_raises(ValueError, decode_value_from_name, b'foobar\x0010\x01') - assert_raises(HeaderError, decode_value_from_name, b'foo\x0010\x00111') + assert decode_value_from_name(b'') == ('', 0) + assert decode_value_from_name(b'foo' + b0 * 7) == ('foo', 1) + assert decode_value_from_name(b'foo\x008' + b0 * 5) == ('foo', 8) + assert decode_value_from_name(b'foobar\x0010\x00') == ('foobar', 10) + + with pytest.raises(ValueError): + decode_value_from_name(b'foobar\x0010\x01') + + with pytest.raises(HeaderError): + decode_value_from_name(b'foo\x0010\x00111')