diff --git a/Changelog b/Changelog index c74d729a67..8a7f93b687 100644 --- a/Changelog +++ b/Changelog @@ -26,6 +26,8 @@ References like "pr/298" refer to github pull request numbers. * Upcoming + * Read and write support for DICOM tags in NIfTI Extended Header using + pydicom (pr/296); * Trackvis reader will now allow final streamline to have fewer points that tne numbe declared in the header, with ``strict=False`` argument to ``read`` function; diff --git a/doc/source/dicom/dicom.rst b/doc/source/dicom/dicom.rst index e7fd8a1c8c..28cedde5d6 100644 --- a/doc/source/dicom/dicom.rst +++ b/doc/source/dicom/dicom.rst @@ -16,6 +16,7 @@ Contents: dicom_mosaic siemens_csa spm_dicom + dicom_niftiheader .. these documents not yet ready for public advertisement diff --git a/doc/source/dicom/dicom_niftiheader.rst b/doc/source/dicom/dicom_niftiheader.rst new file mode 100644 index 0000000000..8e973937ff --- /dev/null +++ b/doc/source/dicom/dicom_niftiheader.rst @@ -0,0 +1,73 @@ +.. _dicom-niftiheader: + +############################## +DICOM Tags in the NIfTI Header +############################## + +NIfTI images include an extended header (see the `NIfTI Extensions Standard`_) +to store, amongst others, DICOM tags and attributes. When NiBabel loads a NIfTI +file containing DICOM information (a NIfTI extension with ``ecode == 2``), it +parses it and returns a pydicom dataset as the content of the NIfTI extension. +This can be read and written to in order to facilitate communication with +software that uses specific DICOM codes found in the NIfTI header. + +For example, the commercial PMOD software stores the Frame Start and Duration +times of images using the DICOM tags (0055, 1001) and (0055, 1004). Here's an +example of an image created in PMOD with those stored times accessed through +nibabel. + +.. code:: python + + >> import nibabel as nib + >> nim = nib.load('pmod_pet.nii') + >> dcmext = nim.header.extensions[0] + >> dcmext + Nifti1Extension('dicom', '(0054, 1001) Units CS: 'Bq/ml' + (0055, 0010) Private Creator LO: 'PMOD_1' + (0055, 1001) [Frame Start Times Vector] FD: [0.0, 30.0, 60.0, ..., 13720.0, 14320.0] + (0055, 1004) [Frame Durations (ms) Vector] FD: [30000.0, 30000.0, 30000.0,600000.0, 600000.0]')) + ++-------------+--------------------------------+---------------------------------------------------------+ +| Tag | Name | Value | ++=============+================================+=========================================================+ +| (0054, 1001)| Units | CS: 'Bq/ml' | ++-------------+--------------------------------+---------------------------------------------------------+ +|(0055, 0010) | Private Creator | LO: 'PMOD_1' | ++-------------+--------------------------------+---------------------------------------------------------+ +|(0055, 1001) | [Frame Start Times Vector] | FD: [0.0, 30.0, 60.0, ..., 13720.0, 14320.0 | ++-------------+--------------------------------+---------------------------------------------------------+ +|(0055, 1004) | [Frame Durations (ms) Vector] | FD: [30000.0, 30000.0, 30000.0, ..., 600000.0, 600000.0 | ++-------------+--------------------------------+---------------------------------------------------------+ + +Access each value as you would with pydicom:: + + >> ds = dcmext.get_content() + >> start_times = ds[0x0055, 0x1001].value + >> durations = ds[0x0055, 0x1004].value + +Creating a PMOD-compatible header is just as easy:: + + >> nim = nib.load('pet.nii') + >> nim.header.extensions + [] + >> from dicom.dataset import Dataset + >> ds = Dataset() + >> ds.add_new((0x0054,0x1001),'CS','Bq/ml') + >> ds.add_new((0x0055,0x0010),'LO','PMOD_1') + >> ds.add_new((0x0055,0x1001),'FD',[0.,30.,60.,13720.,14320.]) + >> ds.add_new((0x0055,0x1004),'FD',[30000.,30000.,30000.,600000.,600000.]) + >> dcmext = nib.nifti1.Nifti1DicomExtension(2,ds) # Use DICOM ecode 2 + >> nim.header.extensions.append(dcmext) + >> nib.save(nim,'pet_withdcm.nii') + +Be careful! Many imaging tools don't maintain information in the extended +header, so it's possible [likely] that this information may be lost during +routine use. You'll have to keep track, and re-write the information if +required. + +Optional Dependency Note: If pydicom is not installed, nibabel uses a generic +:class:`nibabel.nifti1.Nifti1Extension` header instead of parsing DICOM data. + +.. _`NIfTI Extensions Standard`: http://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/extension.html + +.. include:: links_names.txt diff --git a/nibabel/dft.py b/nibabel/dft.py index 2200355920..c90c18df85 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -26,9 +26,7 @@ from .nifti1 import Nifti1Header -# Shield optional dicom import -from .optpkg import optional_package -dicom, have_dicom, _ = optional_package('dicom') +from .pydicom_compat import pydicom, read_file logger = logging.getLogger('nibabel.dft') @@ -236,7 +234,7 @@ def __getattribute__(self, name): return val def dicom(self): - return dicom.read_file(self.files[0]) + return read_file(self.files[0]) class _db_nochange: @@ -383,8 +381,8 @@ def _update_dir(c, dir, files, studies, series, storage_instances): def _update_file(c, path, fname, studies, series, storage_instances): try: - do = dicom.read_file('%s/%s' % (path, fname)) - except dicom.filereader.InvalidDicomError: + do = read_file('%s/%s' % (path, fname)) + except pydicom.filereader.InvalidDicomError: logger.debug(' not a DICOM file') return None try: diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index ec69be32f4..da3b576612 100644 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -50,10 +50,7 @@ def wrapper_from_file(file_like, *args, **kwargs): dcm_w : ``dicomwrappers.Wrapper`` or subclass DICOM wrapper corresponding to DICOM data type """ - try: - from dicom import read_file - except ImportError: - from pydicom.dicomio import read_file + from ..pydicom_compat import read_file with ImageOpener(file_like) as fobj: dcm_data = read_file(fobj, *args, **kwargs) diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index d9066bff6c..63610a8e8f 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -14,10 +14,8 @@ from numpy.testing.decorators import skipif -from .test_dicomwrappers import (have_dicom, dicom_test, - IO_DATA_PATH, DATA, DATA_FILE) -if have_dicom: - from .test_dicomwrappers import pydicom +from nibabel.pydicom_compat import dicom_test, pydicom +from .test_dicomwrappers import (IO_DATA_PATH, DATA) CSA2_B0 = open(pjoin(IO_DATA_PATH, 'csa2_b0.bin'), 'rb').read() CSA2_B1000 = open(pjoin(IO_DATA_PATH, 'csa2_b1000.bin'), 'rb').read() diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index 07958bd630..8e09c07b47 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -6,8 +6,9 @@ from .. import dicomreaders as didr -from .test_dicomwrappers import (dicom_test, - EXPECTED_AFFINE, +from nibabel.pydicom_compat import dicom_test, pydicom + +from .test_dicomwrappers import (EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH, DATA) @@ -41,10 +42,6 @@ def test_passing_kwds(): # Check that we correctly pass keywords to dicom dwi_glob = 'siemens_dwi_*.dcm.gz' csa_glob = 'csa*.bin' - try: - from dicom.filereader import InvalidDicomError - except ImportError: - from pydicom.filereader import InvalidDicomError for func in (didr.read_mosaic_dwi_dir, didr.read_mosaic_dir): data, aff, bs, gs = func(IO_DATA_PATH, dwi_glob) # This should not raise an error @@ -60,7 +57,7 @@ def test_passing_kwds(): dwi_glob, dicom_kwargs=dict(not_a_parameter=True)) # These are invalid dicoms, so will raise an error unless force=True - assert_raises(InvalidDicomError, + assert_raises(pydicom.filereader.InvalidDicomError, func, IO_DATA_PATH, csa_glob) diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 4832ff6729..3faed6f3a4 100644 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -9,19 +9,7 @@ import numpy as np -have_dicom = True -try: - import dicom as pydicom - read_file = pydicom.read_file -except ImportError: - try: - import pydicom - except ImportError: - have_dicom = False - else: - from pydicom.dicomio import read_file -dicom_test = np.testing.dec.skipif(not have_dicom, - 'could not import pydicom') +from nibabel.pydicom_compat import have_dicom, pydicom, read_file, dicom_test from .. import dicomwrappers as didw from .. import dicomreaders as didr diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index 57ea60754f..a7ab9b6bdc 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -12,8 +12,8 @@ from ..utils import find_private_section -from .test_dicomwrappers import (have_dicom, dicom_test, - IO_DATA_PATH, DATA, DATA_PHILIPS) +from nibabel.pydicom_compat import dicom_test, pydicom +from .test_dicomwrappers import (DATA, DATA_PHILIPS) @dicom_test @@ -27,11 +27,7 @@ def test_find_private_section_real(): assert_equal(find_private_section(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER'), None) # Make fake datasets - try: - from dicom.dataset import Dataset - except ImportError: - from pydicom.dataset import Dataset - ds = Dataset({}) + ds = pydicom.dataset.Dataset({}) ds.add_new((0x11, 0x10), 'LO', b'some section') assert_equal(find_private_section(ds, 0x11, 'some section'), 0x1000) ds.add_new((0x11, 0x11), 'LO', b'anther section') diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 5f85ca7783..c90160a23f 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -12,6 +12,7 @@ ''' from __future__ import division, print_function import warnings +from io import BytesIO import numpy as np import numpy.linalg as npl @@ -24,6 +25,7 @@ from . import analyze # module import from .spm99analyze import SpmAnalyzeHeader from .casting import have_binary128 +from .pydicom_compat import have_dicom, pydicom as pdcm # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -257,7 +259,7 @@ def __init__(self, code, content): """ Parameters ---------- - code : int|str + code : int or str Canonical extension code as defined in the NIfTI standard, given either as integer or corresponding label (see :data:`~nibabel.nifti1.extension_codes`) @@ -379,13 +381,101 @@ def write_to(self, fileobj, byteswap): fileobj.write(b'\x00' * (extstart + rawsize - fileobj.tell())) +class Nifti1DicomExtension(Nifti1Extension): + """NIfTI1 DICOM header extension + + This class is a thin wrapper around pydicom to read a binary DICOM + byte string. If pydicom is available, content is exposed as a Dicom Dataset. + Otherwise, this silently falls back to the standard NiftiExtension class + and content is the raw bytestring loaded directly from the nifti file + header. + """ + def __init__(self, code, content, parent_hdr=None): + """ + Parameters + ---------- + code : int or str + Canonical extension code as defined in the NIfTI standard, given + either as integer or corresponding label + (see :data:`~nibabel.nifti1.extension_codes`) + content : bytes or pydicom Dataset or None + Extension content - either a bytestring as read from the NIfTI file + header or an existing pydicom Dataset. If a bystestring, the content + is converted into a Dataset on initialization. If None, a new empty + Dataset is created. + parent_hdr : :class:`~nibabel.nifti1.Nifti1Header`, optional + If a dicom extension belongs to an existing + :class:`~nibabel.nifti1.Nifti1Header`, it may be provided here to + ensure that the DICOM dataset is written with correctly corresponding + endianness; otherwise it is assumed the dataset is little endian. + + Notes + ----- + + code should always be 2 for DICOM. + """ + + self._code = code + if parent_hdr: + self._is_little_endian = parent_hdr.endianness == '<' + else: + self._is_little_endian = True + if isinstance(content, pdcm.dataset.Dataset): + self._is_implicit_VR = False + self._raw_content = self._mangle(content) + self._content = content + elif isinstance(content, bytes): # Got a byte string - unmangle it + self._raw_content = content + self._is_implicit_VR = self._guess_implicit_VR() + ds = self._unmangle(content, self._is_implicit_VR, + self._is_little_endian) + self._content = ds + elif content is None: # initialize a new dicom dataset + self._is_implicit_VR = False + self._content = pdcm.dataset.Dataset() + else: + raise TypeError("content must be either a bytestring or a pydicom " + "Dataset. Got %s" % content.__class__) + + def _guess_implicit_VR(self): + """Try to guess DICOM syntax by checking for valid VRs. + + Without a DICOM Transfer Syntax, it's difficult to tell if Value + Representations (VRs) are included in the DICOM encoding or not. + This reads where the first VR would be and checks it against a list of + valid VRs + """ + potential_vr = self._raw_content[4:6].decode() + if potential_vr in pdcm.values.converters.keys(): + implicit_VR = False + else: + implicit_VR = True + return implicit_VR + + def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True): + bio = BytesIO(value) + ds = pdcm.filereader.read_dataset(bio, + is_implicit_VR, + is_little_endian) + return ds + + def _mangle(self, dataset): + bio = BytesIO() + dio = pdcm.filebase.DicomFileLike(bio) + dio.is_implicit_VR = self._is_implicit_VR + dio.is_little_endian = self._is_little_endian + ds_len = pdcm.filewriter.write_dataset(dio, dataset) + dio.seek(0) + return dio.read(ds_len) + + # NIfTI header extension type codes (ECODE) # see nifti1_io.h for a complete list of all known extensions and # references to their description or contacts of the respective # initiators extension_codes = Recoder(( (0, "ignore", Nifti1Extension), - (2, "dicom", Nifti1Extension), + (2, "dicom", Nifti1DicomExtension if have_dicom else Nifti1Extension), (4, "afni", Nifti1Extension), (6, "comment", Nifti1Extension), (8, "xcede", Nifti1Extension), diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 6951b3f218..7b6507003d 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -95,7 +95,7 @@ import re from io import StringIO from locale import getpreferredencoding -from collections import OrderedDict +from nibabel.externals import OrderedDict from .keywordonly import kw_only_meth from .spatialimages import SpatialHeader, SpatialImage diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py new file mode 100644 index 0000000000..2e188f90e0 --- /dev/null +++ b/nibabel/pydicom_compat.py @@ -0,0 +1,41 @@ +""" Adapter module for working with pydicom < 1.0 and >= 1.0 + +In what follows, "dicom is available" means we can import either a) ``dicom`` +(pydicom < 1.0) or or b) ``pydicom`` (pydicom >= 1.0). + +Regardless of whether dicom is available this module should be importable +without error, and always defines: + +* have_dicom : True if we can import pydicom or dicom; +* pydicom : pydicom module or dicom module or None of not importable; +* read_file : ``read_file`` function if pydicom or dicom module is importable + else None; +* dicom_test : test decorator that skips test if dicom not available. +""" + +import numpy as np + +have_dicom = True +read_file = None +pydicom = None + +try: + import dicom as pydicom + # Values not imported by default + import dicom.values +except ImportError: + try: + import pydicom + except ImportError: + have_dicom = False + else: # pydicom module available + from pydicom.dicomio import read_file + # Values not imported by default + import pydicom.values +else: # dicom module available + read_file = pydicom.read_file + + +# test decorator that skips test if dicom not available. +dicom_test = np.testing.dec.skipif(not have_dicom, + 'could not import dicom or pydicom') diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index af0029443f..9a164549a9 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -17,8 +17,10 @@ # Shield optional package imports from ..optpkg import optional_package + # setup_module will raise SkipTest if no dicom to import -dicom, have_dicom, _ = optional_package('dicom') +from nibabel.pydicom_compat import have_dicom + PImage, have_pil, _ = optional_package('PIL.Image') pil_test = np.testing.dec.skipif(not have_pil, 'could not import PIL.Image') diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 7248d12ea7..6aab35aff0 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -10,6 +10,7 @@ from __future__ import division, print_function, absolute_import import os import warnings +import struct import numpy as np @@ -19,8 +20,8 @@ from nibabel.eulerangles import euler2mat from nibabel.externals.six import BytesIO from nibabel.nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image, - Nifti1Pair, Nifti1Extension, Nifti1Extensions, - data_type_codes, extension_codes, + Nifti1Pair, Nifti1Extension, Nifti1DicomExtension, + Nifti1Extensions, data_type_codes, extension_codes, slice_order_codes) from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory @@ -43,6 +44,8 @@ header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') +from nibabel.pydicom_compat import pydicom, dicom_test + # Example transformation matrix R = [[0, -1, 0], [1, 0, 0], [0, 0, 1]] # rotation matrix @@ -1119,6 +1122,88 @@ def test_nifti_extensions(): assert_true(exts_container.count('afni') == 1) +@dicom_test +def test_nifti_dicom_extension(): + nim = load(image_file) + hdr = nim.header + exts_container = hdr.extensions + + # create an empty dataset if no content provided (to write a new header) + dcmext = Nifti1DicomExtension(2, b'') + assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) + assert_equal(len(dcmext.get_content().values()), 0) + + # create an empty dataset if no content provided (to write a new header) + dcmext = Nifti1DicomExtension(2, None) + assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) + assert_equal(len(dcmext.get_content().values()), 0) + + + # use a dataset if provided + ds = pydicom.dataset.Dataset() + ds.add_new((0x10, 0x20), 'LO', 'NiPy') + dcmext = Nifti1DicomExtension(2, ds) + assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) + assert_equal(len(dcmext.get_content().values()), 1) + assert_equal(dcmext.get_content().PatientID, 'NiPy') + + # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE + dcmbytes_explicit = struct.pack('2H2sH4s', 0x10, 0x20, + 'LO'.encode('utf-8'), 4, + 'NiPy'.encode('utf-8')) + hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header + dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) + assert_equal(dcmext.__class__, Nifti1DicomExtension) + assert_equal(dcmext._guess_implicit_VR(), False) + assert_equal(dcmext.get_code(), 2) + assert_equal(dcmext.get_content().PatientID, 'NiPy') + assert_equal(dcmext.get_content()[0x10, 0x20].value, 'NiPy') + assert_equal(len(dcmext.get_content().values()), 1) + assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) + assert_equal(dcmext.get_sizeondisk() % 16, 0) + + # Check that a dicom dataset is written w/ BE encoding when not created + # using BE bytestring when given a BE nifti header + dcmext = Nifti1DicomExtension(2, ds, parent_hdr=hdr_be) + assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) + + # dicom extension access from nifti extensions + assert_equal(exts_container.count('dicom'), 0) + exts_container.append(dcmext) + assert_equal(exts_container.count('dicom'), 1) + assert_equal(exts_container.get_codes(), [6, 6, 2]) + assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) + assert_equal(dcmext.get_sizeondisk() % 16, 0) + + # creating an extension with bad content should raise + assert_raises(TypeError, Nifti1DicomExtension, 2, 0) + + class TestNifti1General(object): """ Test class to test nifti1 in general diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index a0eb963b4e..3f5c942949 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -338,7 +338,7 @@ def test_parrec2nii_with_data(): assert_true(exists('DTI.ordering.csv')) with open('DTI.ordering.csv', 'r') as csvfile: csvreader = csv.reader(csvfile, delimiter=',') - csv_keys = csvreader.__next__() # header row + csv_keys = next(csvreader) # header row nlines = 0 # count number of non-header rows for line in csvreader: nlines += 1